source
stringlengths
3
92
c
stringlengths
26
2.25M
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode, const ssize_t,const ssize_t,const size_t,const size_t, const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource), (MagickSizeType) MAGICK_SSIZE_MAX); cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource), (MagickSizeType) MAGICK_SSIZE_MAX); cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=(GetLogEventMask() & CacheEvent) != 0 ? MagickTrue : MagickFalse; cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2* number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *nexus_info=(NexusInfo *) AcquireQuantumMemory(number_threads, 2*sizeof(**nexus_info)); if (*nexus_info == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) (2*number_threads); i++) { nexus_info[i]=(*nexus_info+i); if (i < (ssize_t) number_threads) nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); (void) exception; cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply clip mask. */ if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; ssize_t i; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *clone_info, % CacheInfo *cache_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o clone_info: the pixel cache. % % o cache_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) { #if defined(MAGICKCORE_HAVE_LINUX_SENDFILE) if (cache_info->length < 0x7ffff000) { count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL, (size_t) cache_info->length); if (count == (ssize_t) cache_info->length) return(MagickTrue); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); } #endif quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); } buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads); clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { const Quantum *magick_restrict p; Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads); cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) (2*number_threads); i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } *nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=GetMagickTime(); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ if (image->type != UndefinedType) image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the colorspace of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *magick_unused(exception)) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); magick_unreferenced(exception); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset; modulo.remainder=0; if (extent != 0) { modulo.quotient=offset/((ssize_t) extent); modulo.remainder=offset % ((ssize_t) extent); } if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0)) { modulo.quotient-=1; modulo.remainder+=((ssize_t) extent); } return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo *magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; const Quantum *magick_restrict p; const void *magick_restrict r; Quantum *magick_restrict q; ssize_t i, u; unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ virtual_nexus=nexus_info->virtual_nexus; s=(unsigned char *) nexus_info->metacontent; (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels*length; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double gamma; if (fabs((double) (alpha-TransparentAlpha)) < MagickEpsilon) return(q); gamma=1.0-QuantumScale*QuantumScale*alpha*beta; gamma=PerceptibleReciprocal(gamma); return(ClampToQuantum(gamma*MagickOver_((double) p,alpha,(double) q,beta))); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply composite mask. */ if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double alpha; ssize_t i; alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(q[i],alpha,p[i],GetPixelAlpha(image,p)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (cache_info->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0) return(MagickFalse); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (((MagickSizeType) image->columns > cache_info->width_limit) || ((MagickSizeType) image->rows > cache_info->height_limit)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity) { length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); } source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=MagickMax(cache_info->number_channels,1)*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (cache_info->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (cache_info->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->type=DiskCache; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length == (MagickSizeType) ((size_t) length)) { status=AcquireMagickResource(MapResource,cache_info->length); if (status != MagickFalse) { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (cache_info->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (cache_info->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (cache_info->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=MapCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=OpenPixelCacheOnDisk(clone_info,WriteMode); if (status != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; ssize_t y; unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; Quantum *magick_restrict q; ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels( % const CacheInfo *magick_restrict cache_info,const MapMode mode, % const ssize_t x,const ssize_t y,const size_t width,const size_t height, % const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o x,y,width,height: define the region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MagickSizeType length, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { if (length != (MagickSizeType) ((size_t) length)) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=0; nexus_info->mapped=MagickFalse; if (cache_anonymous_memory <= 0) { nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) length); } else { nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length); if (nexus_info->cache != (Quantum *) NULL) nexus_info->mapped=MagickTrue; } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=length; return(MagickTrue); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (nexus_info->length < CACHE_LINE_SIZE) return; if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE, 0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1); } static inline MagickBooleanType ValidatePixelOffset(const ssize_t x, const size_t a) { if ((x >= 0) && (x >= ((ssize_t) MAGICK_SSIZE_MAX-(ssize_t) a))) return(MagickFalse); if (x <= ((ssize_t) MAGICK_SSIZE_MIN+(ssize_t) a)) return(MagickFalse); return(MagickTrue); } static Quantum *SetPixelCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MapMode mode, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); assert(nexus_info->signature == MagickCoreSignature); (void) memset(&nexus_info->region,0,sizeof(nexus_info->region)); if ((width == 0) || (height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "NoPixelsDefinedInCache","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((MagickSizeType) width > cache_info->width_limit) || ((MagickSizeType) height > cache_info->height_limit) || (ValidatePixelOffset(x,width) == MagickFalse) || (ValidatePixelOffset(y,height) == MagickFalse)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "WidthOrHeightExceedsLimit","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { if (((x >= 0) && (y >= 0) && (((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) && (((x == 0) && (width == cache_info->columns)) || ((height == 1) && (((ssize_t) width+x-1) < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) y*cache_info->columns+x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=MagickTrue; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ number_pixels=(MagickSizeType) width*height; length=MagickMax(number_pixels,MagickMax(cache_info->columns, cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; status=MagickTrue; if (nexus_info->cache == (Quantum *) NULL) status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); } if (status == MagickFalse) return((Quantum *) NULL); nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+ cache_info->number_channels*number_pixels); nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=cache_info->type == PingCache ? MagickTrue : MagickFalse; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { if (image->taint == MagickFalse) image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((status != MagickFalse) && (image->taint == MagickFalse)) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; const unsigned char *magick_restrict p; ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; const Quantum *magick_restrict p; ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickCore Deprecated Methods % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #if defined(MAGICKCORE_WINDOWS_SUPPORT) #define WIN32_LEAN_AND_MEAN #define VC_EXTRALEAN #include <windows.h> #endif #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/geometry.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/morphology.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/semaphore-private.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/threshold.h" #include "magick/thread_.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/transform.h" #include "magick/utility.h" #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* Global declarations. */ static MonitorHandler monitor_handler = (MonitorHandler) NULL; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e C a c h e V i e w I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireCacheViewIndexes() returns the indexes associated with the specified % view. % % Deprecated, replace with: % % GetCacheViewVirtualIndexQueue(cache_view); % % The format of the AcquireCacheViewIndexes method is: % % const IndexPacket *AcquireCacheViewIndexes(const CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % */ MagickExport const IndexPacket *AcquireCacheViewIndexes( const CacheView *cache_view) { return(GetCacheViewVirtualIndexQueue(cache_view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireCacheViewPixels() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception); % % The format of the AcquireCacheViewPixels method is: % % const PixelPacket *AcquireCacheViewPixels(const CacheView *cache_view, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const PixelPacket *AcquireCacheViewPixels( const CacheView *cache_view,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { return(GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImagePixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in RAM, or in a memory-mapped file. The % returned pointer should *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access % the black color component or to obtain the colormap indexes (of type % IndexPacket) corresponding to the region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the AcquireImagePixels() and GetAuthenticPixels() methods are not % thread-safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % Deprecated, replace with: % % GetVirtualPixels(image,x,y,columns,rows,exception); % % The format of the AcquireImagePixels() method is: % % const PixelPacket *AcquireImagePixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const PixelPacket *AcquireImagePixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,ExceptionInfo *exception) { return(GetVirtualPixels(image,x,y,columns,rows,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireIndexes() returns the black channel or the colormap indexes % associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % Deprecated, replace with: % % GetVirtualIndexQueue(image); % % The format of the AcquireIndexes() method is: % % const IndexPacket *AcquireIndexes(const Image *image) % % A description of each parameter follows: % % o indexes: AcquireIndexes() returns the indexes associated with the last % call to QueueAuthenticPixels() or GetVirtualPixels(). % % o image: the image. % */ MagickExport const IndexPacket *AcquireIndexes(const Image *image) { return(GetVirtualIndexQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMemory() returns a pointer to a block of memory at least size bytes % suitably aligned for any use. % % The format of the AcquireMemory method is: % % void *AcquireMemory(const size_t size) % % A description of each parameter follows: % % o size: the size of the memory in bytes to allocate. % */ MagickExport void *AcquireMemory(const size_t size) { void *allocation; assert(size != 0); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); allocation=malloc(size); return(allocation); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e C a c h e V i e w P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneCacheViewPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead. % % Deprecated, replace with: % % GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception); % % The format of the AcquireOneCacheViewPixel method is: % % MagickBooleanType AcquireOneCacheViewPixel(const CacheView *cache_view, % const ssize_t x,const ssize_t y,PixelPacket *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y: These values define the offset of the pixel. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AcquireOneCacheViewPixel( const CacheView *cache_view,const ssize_t x,const ssize_t y, PixelPacket *pixel,ExceptionInfo *exception) { return(GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e C a c h e V i e w V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneCacheViewVirtualPixel() returns a single pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead. % % Deprecated, replace with: % % GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method, % x,y,pixel,exception); % % The format of the AcquireOneCacheViewPixel method is: % % MagickBooleanType AcquireOneCacheViewVirtualPixel( % const CacheView *cache_view, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_view: the cache view. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the offset of the pixel. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AcquireOneCacheViewVirtualPixel( const CacheView *cache_view,const VirtualPixelMethod virtual_pixel_method, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { MagickBooleanType status; status=GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method, x,y,pixel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e M a g i c k P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneMagickPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOnePixel() instead. % % Deprecated, replace with: % % MagickPixelPacket pixel; % GetOneVirtualMagickPixel(image,x,y,&pixel,exception); % % The format of the AcquireOneMagickPixel() method is: % % MagickPixelPacket AcquireOneMagickPixel(const Image image,const ssize_t x, % const ssize_t y,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickPixelPacket AcquireOneMagickPixel(const Image *image, const ssize_t x,const ssize_t y,ExceptionInfo *exception) { MagickPixelPacket pixel; (void) GetOneVirtualMagickPixel(image,x,y,&pixel,exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOnePixel() returns a single pixel at the specified (x,y) location. % The image background color is returned if an error occurs. If you plan to % modify the pixel, use GetOnePixel() instead. % % Deprecated, replace with: % % PixelPacket pixel; % GetOneVirtualPixel(image,x,y,&pixel,exception); % % The format of the AcquireOnePixel() method is: % % PixelPacket AcquireOnePixel(const Image image,const ssize_t x, % const ssize_t y,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket AcquireOnePixel(const Image *image,const ssize_t x, const ssize_t y,ExceptionInfo *exception) { PixelPacket pixel; (void) GetOneVirtualPixel(image,x,y,&pixel,exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneVirtualPixel() returns a single pixel at the specified (x,y) % location as defined by specified pixel method. The image background color % is returned if an error occurs. If you plan to modify the pixel, use % GetOnePixel() instead. % % Deprecated, replace with: % % PixelPacket pixel; % GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,exception); % % The format of the AcquireOneVirtualPixel() method is: % % PixelPacket AcquireOneVirtualPixel(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,ExceptionInfo exception) % % A description of each parameter follows: % % o virtual_pixel_method: the virtual pixel method. % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket AcquireOneVirtualPixel(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, ExceptionInfo *exception) { PixelPacket pixel; (void) GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel, exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixels() returns the pixels associated with the last call to % QueueAuthenticPixels() or GetVirtualPixels(). % % Deprecated, replace with: % % GetVirtualPixelQueue(image); % % The format of the AcquirePixels() method is: % % const PixelPacket *AcquirePixels(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const PixelPacket *AcquirePixels(const Image *image) { return(GetVirtualPixelQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e S e m a p h o r e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireSemaphoreInfo() acquires a semaphore. % % The format of the AcquireSemaphoreInfo method is: % % void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info) % % A description of each parameter follows: % % o semaphore_info: Specifies a pointer to an SemaphoreInfo structure. % */ MagickExport void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info) { assert(semaphore_info != (SemaphoreInfo **) NULL); if (*semaphore_info == (SemaphoreInfo *) NULL) { InitializeMagickMutex(); LockMagickMutex(); if (*semaphore_info == (SemaphoreInfo *) NULL) *semaphore_info=AllocateSemaphoreInfo(); UnlockMagickMutex(); } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffinityImage() replaces the colors of an image with the closest color from % a reference image. % % Deprecated, replace with: % % RemapImage(quantize_info,image,affinity_image); % % The format of the AffinityImage method is: % % MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info, % Image *image,const Image *affinity_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o affinity_image: the reference image. % */ MagickExport MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info, Image *image,const Image *affinity_image) { return(RemapImage(quantize_info,image,affinity_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n i t y I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffinityImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % Deprecated, replace with: % % RemapImages(quantize_info,images,affinity_image); % % The format of the AffinityImage method is: % % MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info, % Image *images,Image *affinity_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o affinity_image: the reference image. % */ MagickExport MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info, Image *images,const Image *affinity_image) { return(RemapImages(quantize_info,images,affinity_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateImage() returns a pointer to an image structure initialized to % default values. % % Deprecated, replace with: % % AcquireImage(image_info); % % The format of the AllocateImage method is: % % Image *AllocateImage(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % */ MagickExport Image *AllocateImage(const ImageInfo *image_info) { return(AcquireImage(image_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateImageColormap() allocates an image colormap and initializes % it to a linear gray colorspace. If the image already has a colormap, % it is replaced. AllocateImageColormap() returns MagickTrue if successful, % otherwise MagickFalse if there is not enough memory. % % Deprecated, replace with: % % AcquireImageColormap(image,colors); % % The format of the AllocateImageColormap method is: % % MagickBooleanType AllocateImageColormap(Image *image, % const size_t colors) % % A description of each parameter follows: % % o image: the image. % % o colors: the number of colors in the image colormap. % */ MagickExport MagickBooleanType AllocateImageColormap(Image *image, const size_t colors) { return(AcquireImageColormap(image,colors)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % Deprecated, replace with: % % AcquireNextImage(image_info,image); % % The format of the AllocateNextImage method is: % % void AllocateNextImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % */ MagickExport void AllocateNextImage(const ImageInfo *image_info,Image *image) { AcquireNextImage(image_info,image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e S t r i n g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateString() allocates memory for a string and copies the source string % to that memory location (and returns it). % % The format of the AllocateString method is: % % char *AllocateString(const char *source) % % A description of each parameter follows: % % o source: A character string. % */ MagickExport char *AllocateString(const char *source) { char *destination; size_t length; assert(source != (const char *) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); length=strlen(source)+MaxTextExtent+1; destination=(char *) AcquireQuantumMemory(length,sizeof(*destination)); if (destination == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *destination='\0'; (void) CopyMagickString(destination,source,length); return(destination); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AverageImages() takes a set of images and averages them together. Each % image in the set must have the same width and height. AverageImages() % returns a single image with each corresponding pixel component of each % image averaged. On failure, a NULL image is returned and exception % describes the reason for the failure. % % Deprecated, replace with: % % EvaluateImages(images,MeanEvaluateOperator,exception); % % The format of the AverageImages method is: % % Image *AverageImages(Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AverageImages(const Image *images,ExceptionInfo *exception) { return(EvaluateImages(images,MeanEvaluateOperator,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Extract a channel from the image. A channel is a particular color component % of each pixel in the image. % % Deprecated, replace with: % % SeparateImageChannel(image,channel); % % The format of the ChannelImage method is: % % unsigned int ChannelImage(Image *image,const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, YellowChannel, % or BlackChannel. % */ MagickExport unsigned int ChannelImage(Image *image,const ChannelType channel) { return(SeparateImageChannel(image,channel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChannelThresholdImage() changes the value of individual pixels based on % the intensity of each pixel channel. The result is a high-contrast image. % % The format of the ChannelThresholdImage method is: % % unsigned int ChannelThresholdImage(Image *image,const char *level) % % A description of each parameter follows: % % o image: the image. % % o level: define the threshold values. % */ MagickExport unsigned int ChannelThresholdImage(Image *image,const char *level) { MagickPixelPacket threshold; GeometryInfo geometry_info; unsigned int flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; status=BilevelImageChannel(image,RedChannel,threshold.red); status&=BilevelImageChannel(image,GreenChannel,threshold.green); status&=BilevelImageChannel(image,BlueChannel,threshold.blue); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPathImage() sets the image clip mask based any clipping path information % if it exists. % % Deprecated, replace with: % % ClipImagePath(image,pathname,inside); % % The format of the ClipImage method is: % % MagickBooleanType ClipPathImage(Image *image,const char *pathname, % const MagickBooleanType inside) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ MagickExport MagickBooleanType ClipPathImage(Image *image,const char *pathname, const MagickBooleanType inside) { return(ClipImagePath(image,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e A t t r i b u t e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageAttributes() clones one or more image attributes. % % Deprecated, replace with: % % CloneImageProperties(image,clone_image); % % The format of the CloneImageAttributes method is: % % MagickBooleanType CloneImageAttributes(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageAttributes(Image *image, const Image *clone_image) { return(CloneImageProperties(image,clone_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneMemory() copies size bytes from memory area source to the destination. % Copying between objects that overlap will take place correctly. It returns % destination. % % The format of the CloneMemory method is: % % void *CloneMemory(void *destination,const void *source, % const size_t size) % % A description of each parameter follows: % % o destination: the destination. % % o source: the source. % % o size: the size of the memory in bytes to allocate. % */ MagickExport void *CloneMemory(void *destination,const void *source, const size_t size) { register const unsigned char *p; register unsigned char *q; register ssize_t i; assert(destination != (void *) NULL); assert(source != (const void *) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); p=(const unsigned char *) source; q=(unsigned char *) destination; if ((p <= q) || ((p+size) >= q)) return(memcpy(destination,source,size)); /* Overlap, copy backwards. */ p+=size; q+=size; for (i=(ssize_t) (size-1); i >= 0; i--) *--q=(*--p); return(destination); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o s e C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloseCacheView() closes the specified view returned by a previous call to % OpenCacheView(). % % Deprecated, replace with: % % DestroyCacheView(view_info); % % The format of the CloseCacheView method is: % % CacheView *CloseCacheView(CacheView *view_info) % % A description of each parameter follows: % % o view_info: the address of a structure of type CacheView. % */ MagickExport CacheView *CloseCacheView(CacheView *view_info) { return(DestroyCacheView(view_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorFloodfill() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the ColorFloodfillImage method is: % % MagickBooleanType ColorFloodfillImage(Image *image, % const DrawInfo *draw_info,const PixelPacket target, % const ssize_t x_offset,const ssize_t y_offset,const PaintMethod method) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x,y: the starting location of the operation. % % o method: Choose either FloodfillMethod or FillToBorderMethod. % */ #define MaxStacksize (1UL << 15) #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryImageException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } MagickExport MagickBooleanType ColorFloodfillImage(Image *image, const DrawInfo *draw_info,const PixelPacket target,const ssize_t x_offset, const ssize_t y_offset,const PaintMethod method) { Image *floodplane_image; MagickBooleanType skip; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); /* Set floodfill color. */ segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize, sizeof(*segment_stack)); if (segment_stack == (SegmentInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Push initial segment on stack. */ x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); while (s > segment_stack) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception); q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetVirtualPixels(image,x,y,image->columns-x,1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) != MagickFalse) break; } else if (IsColorSimilar(image,p,&target) == MagickFalse) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Tile fill color onto floodplane. */ p=GetVirtualPixels(floodplane_image,0,y,image->columns,1, &image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q, (MagickRealType) q->opacity,q); } p++; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s t i t u t e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteComponentGenesis() instantiates the constitute component. % % The format of the ConstituteComponentGenesis method is: % % MagickBooleanType ConstituteComponentGenesis(void) % */ MagickExport MagickBooleanType ConstituteComponentGenesis(void) { return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s t i t u t e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteComponentTerminus() destroys the constitute component. % % The format of the ConstituteComponentTerminus method is: % % ConstituteComponentTerminus(void) % */ MagickExport void ConstituteComponentTerminus(void) { } #if defined(MAGICKCORE_WINDOWS_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o H B i t m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToHBITMAP() extracts a specified region of the image and returns % it as a Windows HBITMAP. While the same functionality can be accomplished by % invoking CropImage() followed by ImageToHBITMAP(), this method is more % efficient since it copies pixels directly to the HBITMAP. % % The format of the CropImageToHBITMAP method is: % % HBITMAP CropImageToHBITMAP(Image* image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *CropImageToHBITMAP(Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define CropImageTag "Crop/Image" BITMAP bitmap; HBITMAP bitmapH; HANDLE bitmap_bitsH; MagickBooleanType proceed; RectangleInfo page; register const PixelPacket *p; register RGBQUAD *q; RGBQUAD *bitmap_bits; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (((geometry->x+(ssize_t) geometry->width) < 0) || ((geometry->y+(ssize_t) geometry->height) < 0) || (geometry->x >= (ssize_t) image->columns) || (geometry->y >= (ssize_t) image->rows)) ThrowImageException(OptionError,"GeometryDoesNotContainImage"); page=(*geometry); if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if (page.x < 0) { page.width+=page.x; page.x=0; } if (page.y < 0) { page.height+=page.y; page.y=0; } if ((page.width == 0) || (page.height == 0)) ThrowImageException(OptionError,"GeometryDimensionsAreZero"); /* Initialize crop image attributes. */ bitmap.bmType = 0; bitmap.bmWidth = (LONG) page.width; bitmap.bmHeight = (LONG) page.height; bitmap.bmWidthBytes = bitmap.bmWidth * 4; bitmap.bmPlanes = 1; bitmap.bmBitsPixel = 32; bitmap.bmBits = NULL; bitmap_bitsH=(HANDLE) GlobalAlloc(GMEM_MOVEABLE | GMEM_DDESHARE,page.width* page.height*bitmap.bmBitsPixel); if (bitmap_bitsH == NULL) return(NULL); bitmap_bits=(RGBQUAD *) GlobalLock((HGLOBAL) bitmap_bitsH); if ( bitmap.bmBits == NULL ) bitmap.bmBits = bitmap_bits; if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) SetImageColorspace(image,sRGBColorspace); /* Extract crop image. */ q=bitmap_bits; for (y=0; y < (ssize_t) page.height; y++) { register ssize_t x; p=GetVirtualPixels(image,page.x,page.y+y,page.width,1,exception); if (p == (const PixelPacket *) NULL) break; /* Transfer pixels, scaling to Quantum */ for( x=(ssize_t) page.width ; x> 0 ; x-- ) { q->rgbRed = ScaleQuantumToChar(GetPixelRed(p)); q->rgbGreen = ScaleQuantumToChar(GetPixelGreen(p)); q->rgbBlue = ScaleQuantumToChar(GetPixelBlue(p)); q->rgbReserved = 0; p++; q++; } proceed=SetImageProgress(image,CropImageTag,y,page.height); if (proceed == MagickFalse) break; } if (y < (ssize_t) page.height) { GlobalUnlock((HGLOBAL) bitmap_bitsH); GlobalFree((HGLOBAL) bitmap_bitsH); return((void *) NULL); } bitmap.bmBits=bitmap_bits; bitmapH=CreateBitmapIndirect(&bitmap); GlobalUnlock((HGLOBAL) bitmap_bitsH); GlobalFree((HGLOBAL) bitmap_bitsH); return((void *) bitmapH); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageAttribute() deletes an attribute from the image. % % Deprecated, replace with: % % DeleteImageProperty(image,key); % % The format of the DeleteImageAttribute method is: % % MagickBooleanType DeleteImageAttribute(Image *image,const char *key) % % A description of each parameter follows: % % o image: the image info. % % o key: the image key. % */ MagickExport MagickBooleanType DeleteImageAttribute(Image *image, const char *key) { return(DeleteImageProperty(image,key)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageList() deletes an image at the specified position in the list. % % The format of the DeleteImageList method is: % % unsigned int DeleteImageList(Image *images,const ssize_t offset) % % A description of each parameter follows: % % o images: the image list. % % o offset: the position within the list. % */ MagickExport unsigned int DeleteImageList(Image *images,const ssize_t offset) { register ssize_t i; if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); while (GetPreviousImageInList(images) != (Image *) NULL) images=GetPreviousImageInList(images); for (i=0; i < offset; i++) { if (GetNextImageInList(images) == (Image *) NULL) return(MagickFalse); images=GetNextImageInList(images); } DeleteImageFromList(&images); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteMagickRegistry() deletes an entry in the registry as defined by the id. % It returns MagickTrue if the entry is deleted otherwise MagickFalse if no % entry is found in the registry that matches the id. % % Deprecated, replace with: % % char key[MaxTextExtent]; % FormatLocaleString(key,MaxTextExtent,"%ld\n",id); % DeleteImageRegistry(key); % % The format of the DeleteMagickRegistry method is: % % MagickBooleanType DeleteMagickRegistry(const ssize_t id) % % A description of each parameter follows: % % o id: the registry id. % */ MagickExport MagickBooleanType DeleteMagickRegistry(const ssize_t id) { char key[MaxTextExtent]; (void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id); return(DeleteImageRegistry(key)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C o n s t i t u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyConstitute() destroys the constitute component. % % The format of the DestroyConstitute method is: % % DestroyConstitute(void) % */ MagickExport void DestroyConstitute(void) { } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMagickRegistry() deallocates memory associated the magick registry. % % Deprecated, replace with: % % RegistryComponentTerminus(); % % The format of the DestroyMagickRegistry method is: % % void DestroyMagickRegistry(void) % */ MagickExport void DestroyMagickRegistry(void) { RegistryComponentTerminus(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DescribeImage() describes an image by printing its attributes to the file. % Attributes include the image width, height, size, and others. % % Deprecated, replace with: % % IdentifyImage(image,file,verbose); % % The format of the DescribeImage method is: % % MagickBooleanType DescribeImage(Image *image,FILE *file, % const MagickBooleanType verbose) % % A description of each parameter follows: % % o image: the image. % % o file: the file, typically stdout. % % o verbose: A value other than zero prints more detailed information % about the image. % */ MagickExport MagickBooleanType DescribeImage(Image *image,FILE *file, const MagickBooleanType verbose) { return(IdentifyImage(image,file,verbose)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e A t t r i b u t e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageAttributes() deallocates memory associated with the image % attribute list. % % The format of the DestroyImageAttributes method is: % % DestroyImageAttributes(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageAttributes(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->attributes != (void *) NULL) image->attributes=(void *) DestroySplayTree((SplayTreeInfo *) image->attributes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImages() destroys an image list. % % Deprecated, replace with: % % DestroyImageList(image); % % The format of the DestroyImages method is: % % void DestroyImages(Image *image) % % A description of each parameter follows: % % o image: the image sequence. % */ MagickExport void DestroyImages(Image *image) { if (image == (Image *) NULL) return; if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3"); image=DestroyImageList(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M a g i c k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMagick() destroys the ImageMagick environment. % % Deprecated, replace with: % % MagickCoreTerminus(); % % The format of the DestroyMagick function is: % % DestroyMagick(void) % */ MagickExport void DestroyMagick(void) { MagickCoreTerminus(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s p a t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DispatchImage() extracts pixel data from an image and returns it to you. % The method returns MagickFalse on success otherwise MagickTrue if an error is % encountered. The data is returned as char, short int, int, ssize_t, float, % or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % DispatchImage(image,0,0,640,1,"RGB",CharPixel,pixels,exception); % % Deprecated, replace with: % % ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels, % exception); % % The format of the DispatchImage method is: % % unsigned int DispatchImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,const size_t columns, % const size_t rows,const char *map,const StorageType type, % void *pixels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset, y_offset, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha, C = cyan, Y = yellow, M = magenta, K = black, or % I = intensity (for grayscale). % % o type: Define the data type of the pixels. Float and double types are % normalized to [0..1] otherwise [0..QuantumRange]. Choose from these % types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, or % DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int DispatchImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const char *map,const StorageType type,void *pixels,ExceptionInfo *exception) { unsigned int status; if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6"); status=ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels, exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t r a c t S u b i m a g e F r o m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtractSubimageFromImageImage() extracts a region of the image that most % closely resembles the reference. % % The format of the ExtractSubimageFromImageImage method is: % % Image *ExtractSubimageFromImage(const Image *image, % const Image *reference,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const ssize_t x_offset,const ssize_t y_offset, const double similarity_threshold,ExceptionInfo *exception) { CacheView *image_view, *reference_view; double channels, normalized_similarity, similarity; ssize_t y; /* Compute the similarity in pixels between two images. */ normalized_similarity=1.0; similarity=0.0; channels=3; if ((image->matte != MagickFalse) && (reference->matte != MagickFalse)) channels++; if ((image->colorspace == CMYKColorspace) && (reference->colorspace == CMYKColorspace)) channels++; image_view=AcquireVirtualCacheView(image,exception); reference_view=AcquireVirtualCacheView(reference,exception); for (y=0; y < (ssize_t) reference->rows; y++) { register const IndexPacket *indexes, *reference_indexes; register const PixelPacket *p, *q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset+y, reference->columns,1,exception); q=GetCacheViewVirtualPixels(reference_view,0,y,reference->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); reference_indexes=GetCacheViewVirtualIndexQueue(reference_view); for (x=0; x < (ssize_t) reference->columns; x++) { MagickRealType pixel; pixel=QuantumScale*(GetPixelRed(p)-(double) GetPixelRed(q)); similarity+=pixel*pixel; pixel=QuantumScale*(GetPixelGreen(p)-(double) GetPixelGreen(q)); similarity+=pixel*pixel; pixel=QuantumScale*(GetPixelBlue(p)-(double) GetPixelBlue(q)); similarity+=pixel*pixel; if ((image->matte != MagickFalse) && (reference->matte != MagickFalse)) { pixel=QuantumScale*(GetPixelOpacity(p)-(double) GetPixelOpacity(q)); similarity+=pixel*pixel; } if ((image->colorspace == CMYKColorspace) && (reference->colorspace == CMYKColorspace)) { pixel=QuantumScale*(GetPixelIndex(indexes+x)-(double) GetPixelIndex(reference_indexes+x)); similarity+=pixel*pixel; } p++; q++; } normalized_similarity=sqrt(similarity)/reference->columns/reference->rows/ channels; if (normalized_similarity > similarity_threshold) break; } reference_view=DestroyCacheView(reference_view); image_view=DestroyCacheView(image_view); return(normalized_similarity); } MagickExport Image *ExtractSubimageFromImage(Image *image, const Image *reference,ExceptionInfo *exception) { double similarity_threshold; RectangleInfo offset; ssize_t y; /* Extract reference from image. */ if ((reference->columns > image->columns) || (reference->rows > image->rows)) return((Image *) NULL); similarity_threshold=(double) image->columns*image->rows; SetGeometry(reference,&offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows); y++) { double similarity; register ssize_t x; for (x=0; x < (ssize_t) (image->columns-reference->columns); x++) { similarity=GetSimilarityMetric(image,reference,x,y,similarity_threshold, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExtractSubimageFromImage) #endif if (similarity < similarity_threshold) { similarity_threshold=similarity; offset.x=x; offset.y=y; } } } if (similarity_threshold > (QuantumScale*reference->fuzz/100.0)) return((Image *) NULL); return(CropImage(image,&offset,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l a t t e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlattenImages() Obsolete Function: Use MergeImageLayers() instead. % % Deprecated, replace with: % % MergeImageLayers(image,FlattenLayer,exception); % % The format of the FlattenImage method is: % % Image *FlattenImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlattenImages(Image *image,ExceptionInfo *exception) { return(MergeImageLayers(image,FlattenLayer,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r m a t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FormatImageAttribute() permits formatted key/value pairs to be saved as an % image attribute. % % The format of the FormatImageAttribute method is: % % MagickBooleanType FormatImageAttribute(Image *image,const char *key, % const char *format,...) % % A description of each parameter follows. % % o image: The image. % % o key: The attribute key. % % o format: A string describing the format to use to write the remaining % arguments. % */ MagickExport MagickBooleanType FormatImageAttributeList(Image *image, const char *key,const char *format,va_list operands) { char value[MaxTextExtent]; int n; #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(value,MaxTextExtent,format,operands); #else n=vsprintf(value,format,operands); #endif if (n < 0) value[MaxTextExtent-1]='\0'; return(SetImageProperty(image,key,value)); } MagickExport MagickBooleanType FormatImagePropertyList(Image *image, const char *property,const char *format,va_list operands) { char value[MaxTextExtent]; int n; #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(value,MaxTextExtent,format,operands); #else n=vsprintf(value,format,operands); #endif if (n < 0) value[MaxTextExtent-1]='\0'; return(SetImageProperty(image,property,value)); } MagickExport MagickBooleanType FormatImageAttribute(Image *image, const char *key,const char *format,...) { char value[MaxTextExtent]; int n; va_list operands; va_start(operands,format); n=FormatLocaleStringList(value,MaxTextExtent,format,operands); (void) n; va_end(operands); return(SetImageProperty(image,key,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r m a t M a g i c k S t r i n g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FormatMagickString() prints formatted output of a variable argument list. % % The format of the FormatMagickString method is: % % ssize_t FormatMagickString(char *string,const size_t length, % const char *format,...) % % A description of each parameter follows. % % o string: FormatMagickString() returns the formatted string in this % character buffer. % % o length: the maximum length of the string. % % o format: A string describing the format to use to write the remaining % arguments. % */ MagickExport ssize_t FormatMagickStringList(char *string,const size_t length, const char *format,va_list operands) { int n; #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(string,length,format,operands); #else n=vsprintf(string,format,operands); #endif if (n < 0) string[length-1]='\0'; return((ssize_t) n); } MagickExport ssize_t FormatMagickString(char *string,const size_t length, const char *format,...) { ssize_t n; va_list operands; va_start(operands,format); n=(ssize_t) FormatMagickStringList(string,length,format,operands); va_end(operands); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r m a t S t r i n g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FormatString() prints formatted output of a variable argument list. % % The format of the FormatString method is: % % void FormatString(char *string,const char *format,...) % % A description of each parameter follows. % % o string: Method FormatString returns the formatted string in this % character buffer. % % o format: A string describing the format to use to write the remaining % arguments. % */ MagickExport void FormatStringList(char *string,const char *format, va_list operands) { int n; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(string,MaxTextExtent,format,operands); #else n=vsprintf(string,format,operands); #endif if (n < 0) string[MaxTextExtent-1]='\0'; } MagickExport void FormatString(char *string,const char *format,...) { va_list operands; va_start(operands,format); (void) FormatLocaleStringList(string,MaxTextExtent,format,operands); va_end(operands); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F u z z y C o l o r M a t c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FuzzyColorMatch() returns true if two pixels are identical in color. % % The format of the ColorMatch method is: % % void FuzzyColorMatch(const PixelPacket *p,const PixelPacket *q, % const double fuzz) % % A description of each parameter follows: % % o p: Pixel p. % % o q: Pixel q. % % o distance: Define how much tolerance is acceptable to consider % two colors as the same. % */ MagickExport unsigned int FuzzyColorMatch(const PixelPacket *p, const PixelPacket *q,const double fuzz) { MagickPixelPacket pixel; register MagickRealType distance; if ((fuzz == 0.0) && (GetPixelRed(p) == GetPixelRed(q)) && (GetPixelGreen(p) == GetPixelGreen(q)) && (GetPixelBlue(p) == GetPixelBlue(q))) return(MagickTrue); pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q); distance=pixel.red*pixel.red; if (distance > (fuzz*fuzz)) return(MagickFalse); pixel.green=GetPixelGreen(p)-(MagickRealType) GetPixelGreen(q); distance+=pixel.green*pixel.green; if (distance > (fuzz*fuzz)) return(MagickFalse); pixel.blue=GetPixelBlue(p)-(MagickRealType) GetPixelBlue(q); distance+=pixel.blue*pixel.blue; if (distance > (fuzz*fuzz)) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F u z z y C o l o r C o m p a r e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FuzzyColorCompare() returns MagickTrue if the distance between two colors is % less than the specified distance in a linear three dimensional color space. % This method is used by ColorFloodFill() and other algorithms which % compare two colors. % % The format of the FuzzyColorCompare method is: % % void FuzzyColorCompare(const Image *image,const PixelPacket *p, % const PixelPacket *q) % % A description of each parameter follows: % % o image: the image. % % o p: Pixel p. % % o q: Pixel q. % */ MagickExport MagickBooleanType FuzzyColorCompare(const Image *image, const PixelPacket *p,const PixelPacket *q) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5"); return(IsColorSimilar(image,p,q)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F u z z y O p a c i t y C o m p a r e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FuzzyOpacityCompare() returns true if the distance between two opacity % values is less than the specified distance in a linear color space. This % method is used by MatteFloodFill() and other algorithms which compare % two opacity values. % % Deprecated, replace with: % % IsOpacitySimilar(image,p,q); % % The format of the FuzzyOpacityCompare method is: % % void FuzzyOpacityCompare(const Image *image,const PixelPacket *p, % const PixelPacket *q) % % A description of each parameter follows: % % o image: the image. % % o p: Pixel p. % % o q: Pixel q. % */ MagickExport MagickBooleanType FuzzyOpacityCompare(const Image *image, const PixelPacket *p,const PixelPacket *q) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5"); return(IsOpacitySimilar(image,p,q)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C o n f i g u r e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetConfigureBlob() returns the specified configure file as a blob. % % The format of the GetConfigureBlob method is: % % void *GetConfigureBlob(const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o filename: the configure file name. % % o path: return the full path information of the configure file. % % o length: This pointer to a size_t integer sets the initial length of the % blob. On return, it reflects the actual length of the blob. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetConfigureBlob(const char *filename,char *path, size_t *length,ExceptionInfo *exception) { void *blob; assert(filename != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",filename); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); assert(path != (char *) NULL); assert(length != (size_t *) NULL); assert(exception != (ExceptionInfo *) NULL); blob=(void *) NULL; (void) CopyMagickString(path,filename,MaxTextExtent); #if defined(MAGICKCORE_INSTALLED_SUPPORT) #if defined(MAGICKCORE_LIBRARY_PATH) if (blob == (void *) NULL) { /* Search hard coded paths. */ (void) FormatLocaleString(path,MaxTextExtent,"%s%s", MAGICKCORE_LIBRARY_PATH,filename); if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0UL,length,exception); } #endif #if defined(MAGICKCORE_WINDOWS_SUPPORT) && !(defined(MAGICKCORE_CONFIGURE_PATH) || defined(MAGICKCORE_SHARE_PATH)) if (blob == (void *) NULL) { unsigned char *key_value; /* Locate file via registry key. */ key_value=NTRegistryKeyLookup("ConfigurePath"); if (key_value != (unsigned char *) NULL) { (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",(char *) key_value,DirectorySeparator,filename); if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0UL,length,exception); } } #endif #else if (blob == (void *) NULL) { char *home; home=GetEnvironmentValue("MAGICK_HOME"); if (home != (char *) NULL) { /* Search MAGICK_HOME. */ #if !defined(MAGICKCORE_POSIX_SUPPORT) (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",home, DirectorySeparator,filename); #else (void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",home, MAGICKCORE_LIBRARY_RELATIVE_PATH,filename); #endif if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0UL,length,exception); home=DestroyString(home); } home=GetEnvironmentValue("HOME"); if (home == (char *) NULL) home=GetEnvironmentValue("USERPROFILE"); if (home != (char *) NULL) { /* Search $HOME/.magick. */ (void) FormatLocaleString(path,MaxTextExtent,"%s%s.magick%s%s",home, DirectorySeparator,DirectorySeparator,filename); if ((IsPathAccessible(path) != MagickFalse) && (blob == (void *) NULL)) blob=FileToBlob(path,~0UL,length,exception); home=DestroyString(home); } } if ((blob == (void *) NULL) && (*GetClientPath() != '\0')) { #if !defined(MAGICKCORE_POSIX_SUPPORT) (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",GetClientPath(), DirectorySeparator,filename); #else char prefix[MaxTextExtent]; /* Search based on executable directory if directory is known. */ (void) CopyMagickString(prefix,GetClientPath(), MaxTextExtent); ChopPathComponents(prefix,1); (void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",prefix, MAGICKCORE_LIBRARY_RELATIVE_PATH,filename); #endif if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0UL,length,exception); } /* Search current directory. */ if ((blob == (void *) NULL) && (IsPathAccessible(path) != MagickFalse)) blob=FileToBlob(path,~0UL,length,exception); #if defined(MAGICKCORE_WINDOWS_SUPPORT) /* Search Windows registry. */ if (blob == (void *) NULL) blob=NTResourceToBlob(filename); #endif #endif if (blob == (void *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),ConfigureWarning, "UnableToOpenConfigureFile","`%s'",path); return(blob); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCacheView() gets pixels from the in-memory or disk pixel cache as % defined by the geometry parameters. A pointer to the pixels is returned if % the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, % GetCacheViewException(cache_view)); % % The format of the GetCacheView method is: % % PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows) % % A description of each parameter follows: % % o cache_view: the address of a structure of type CacheView. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows) { PixelPacket *pixels; pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, GetCacheViewException(cache_view)); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C a c h e V i e w I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCacheViewIndexes() returns the indexes associated with the specified % view. % % Deprecated, replace with: % % GetCacheViewAuthenticIndexQueue(cache_view); % % The format of the GetCacheViewIndexes method is: % % IndexPacket *GetCacheViewIndexes(CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % */ MagickExport IndexPacket *GetCacheViewIndexes(CacheView *cache_view) { return(GetCacheViewAuthenticIndexQueue(cache_view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as % defined by the geometry parameters. A pointer to the pixels is returned if % the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, % GetCacheViewException(cache_view)); % % The format of the GetCacheViewPixels method is: % % PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows) { PixelPacket *pixels; pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, GetCacheViewException(cache_view)); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t E x c e p t i o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetExceptionInfo() initializes an exception to default values. % % The format of the GetExceptionInfo method is: % % GetExceptionInfo(ExceptionInfo *exception) % % A description of each parameter follows: % % o exception: the exception info. % */ MagickExport void GetExceptionInfo(ExceptionInfo *exception) { assert(exception != (ExceptionInfo *) NULL); (void) memset(exception,0,sizeof(*exception)); exception->severity=UndefinedException; exception->exceptions=(void *) NewLinkedList(0); exception->semaphore=AllocateSemaphoreInfo(); exception->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAttribute() searches the list of image attributes and returns % a pointer to the attribute if it exists otherwise NULL. % % The format of the GetImageAttribute method is: % % const ImageAttribute *GetImageAttribute(const Image *image, % const char *key) % % A description of each parameter follows: % % o image: the image. % % o key: These character strings are the name of an image attribute to % return. % */ static void *DestroyAttribute(void *attribute) { register ImageAttribute *p; p=(ImageAttribute *) attribute; if (p->value != (char *) NULL) p->value=DestroyString(p->value); return(RelinquishMagickMemory(p)); } MagickExport const ImageAttribute *GetImageAttribute(const Image *image, const char *key) { const char *value; ImageAttribute *attribute; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1"); value=GetImageProperty(image,key); if (value == (const char *) NULL) return((const ImageAttribute *) NULL); if (image->attributes == (void *) NULL) ((Image *) image)->attributes=NewSplayTree(CompareSplayTreeString, RelinquishMagickMemory,DestroyAttribute); else { const ImageAttribute *attribute; attribute=(const ImageAttribute *) GetValueFromSplayTree((SplayTreeInfo *) image->attributes,key); if (attribute != (const ImageAttribute *) NULL) return(attribute); } attribute=(ImageAttribute *) AcquireMagickMemory(sizeof(*attribute)); if (attribute == (ImageAttribute *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(attribute,0,sizeof(*attribute)); attribute->key=ConstantString(key); attribute->value=ConstantString(value); (void) AddValueToSplayTree((SplayTreeInfo *) ((Image *) image)->attributes, attribute->key,attribute); return((const ImageAttribute *) attribute); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C l i p p i n g P a t h A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageClippingPathAttribute() searches the list of image attributes and % returns a pointer to a clipping path if it exists otherwise NULL. % % Deprecated, replace with: % % GetImageAttribute(image,"8BIM:1999,2998"); % % The format of the GetImageClippingPathAttribute method is: % % const ImageAttribute *GetImageClippingPathAttribute(Image *image) % % A description of each parameter follows: % % o attribute: Method GetImageClippingPathAttribute returns the clipping % path if it exists otherwise NULL. % % o image: the image. % */ MagickExport const ImageAttribute *GetImageClippingPathAttribute(Image *image) { return(GetImageAttribute(image,"8BIM:1999,2998")); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e F r o m M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageFromMagickRegistry() gets an image from the registry as defined by % its name. If the image is not found, a NULL image is returned. % % Deprecated, replace with: % % GetImageRegistry(ImageRegistryType,name,exception); % % The format of the GetImageFromMagickRegistry method is: % % Image *GetImageFromMagickRegistry(const char *name,ssize_t *id, % ExceptionInfo *exception) % % A description of each parameter follows: % % o name: the name of the image to retrieve from the registry. % % o id: the registry id. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GetImageFromMagickRegistry(const char *name,ssize_t *id, ExceptionInfo *exception) { *id=0L; return((Image *) GetImageRegistry(ImageRegistryType,name,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickRegistry() gets a blob from the registry as defined by the id. If % the blob that matches the id is not found, NULL is returned. % % The format of the GetMagickRegistry method is: % % const void *GetMagickRegistry(const ssize_t id,RegistryType *type, % size_t *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o id: the registry id. % % o type: the registry type. % % o length: the blob length in number of bytes. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetMagickRegistry(const ssize_t id,RegistryType *type, size_t *length,ExceptionInfo *exception) { char key[MaxTextExtent]; void *blob; *type=UndefinedRegistryType; *length=0; (void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id); blob=(void *) GetImageRegistry(ImageRegistryType,key,exception); if (blob != (void *) NULL) return(blob); blob=(void *) GetImageRegistry(ImageInfoRegistryType,key,exception); if (blob != (void *) NULL) return(blob); return((void *) GetImageRegistry(UndefinedRegistryType,key,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t M a g i c k T o k e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickToken() gets a token from the token stream. A token is defined as % a sequence of characters delimited by whitespace (e.g. clip-path), a % sequence delimited with quotes (.e.g "Quote me"), or a sequence enclosed in % parenthesis (e.g. rgb(0,0,0)). GetMagickToken() also recognizes these % separator characters: ':', '=', ',', and ';'. % % The format of the GetMagickToken method is: % % void GetMagickToken(const char *start,const char **end,char *token) % % A description of each parameter follows: % % o start: the start of the token sequence. % % o end: point to the end of the token sequence. % % o token: copy the token to this buffer. % */ MagickExport void GetMagickToken(const char *start,const char **end,char *token) { (void) GetNextToken(start,end,~0UL,token); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageGeometry() returns a region as defined by the geometry string with % respect to the image and its gravity. % % Deprecated, replace with: % % if (size_to_fit != MagickFalse) % ParseRegionGeometry(image,geometry,region_info,&image->exception); else % ParsePageGeometry(image,geometry,region_info,&image->exception); % % The format of the GetImageGeometry method is: % % int GetImageGeometry(Image *image,const char *geometry, % const unsigned int size_to_fit,RectangeInfo *region_info) % % A description of each parameter follows: % % o flags: Method GetImageGeometry returns a bitmask that indicates % which of the four values were located in the geometry string. % % o geometry: The geometry (e.g. 100x100+10+10). % % o size_to_fit: A value other than 0 means to scale the region so it % fits within the specified width and height. % % o region_info: the region as defined by the geometry string with % respect to the image and its gravity. % */ MagickExport int GetImageGeometry(Image *image,const char *geometry, const unsigned int size_to_fit,RectangleInfo *region_info) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.4"); if (size_to_fit != MagickFalse) return((int) ParseRegionGeometry(image,geometry,region_info,&image->exception)); return((int) ParsePageGeometry(image,geometry,region_info,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageList() returns an image at the specified position in the list. % % Deprecated, replace with: % % CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue, % exception); % % The format of the GetImageList method is: % % Image *GetImageList(const Image *images,const ssize_t offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o offset: the position within the list. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GetImageList(const Image *images,const ssize_t offset, ExceptionInfo *exception) { Image *image; if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); image=CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue, exception); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e L i s t I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageListIndex() returns the position in the list of the specified % image. % % Deprecated, replace with: % % GetImageIndexInList(images); % % The format of the GetImageListIndex method is: % % ssize_t GetImageListIndex(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport ssize_t GetImageListIndex(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetImageIndexInList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e L i s t S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageListSize() returns the number of images in the list. % % Deprecated, replace with: % % GetImageListLength(images); % % The format of the GetImageListSize method is: % % size_t GetImageListSize(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport size_t GetImageListSize(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetImageListLength(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a PixelPacket array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in RAM, or in a memory-mapped file. The returned pointer % should *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or if the storage class is % PseduoClass, call GetAuthenticIndexQueue() after invoking GetImagePixels() % to obtain the black color component or colormap indexes (of type IndexPacket) % corresponding to the region. Once the PixelPacket (and/or IndexPacket) % array has been updated, the changes must be saved back to the underlying % image using SyncAuthenticPixels() or they may be lost. % % Deprecated, replace with: % % GetAuthenticPixels(image,x,y,columns,rows,&image->exception); % % The format of the GetImagePixels() method is: % % PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows) { return(GetAuthenticPixels(image,x,y,columns,rows,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetIndexes() returns the black channel or the colormap indexes associated % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the black channel or colormap indexes are not available. % % Deprecated, replace with: % % GetAuthenticIndexQueue(image); % % The format of the GetIndexes() method is: % % IndexPacket *GetIndexes(const Image *image) % % A description of each parameter follows: % % o indexes: GetIndexes() returns the indexes associated with the last % call to QueueAuthenticPixels() or GetAuthenticPixels(). % % o image: the image. % */ MagickExport IndexPacket *GetIndexes(const Image *image) { return(GetAuthenticIndexQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t M a g i c k G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickGeometry() is similar to GetGeometry() except the returned % geometry is modified as determined by the meta characters: %, !, <, >, % and ~. % % Deprecated, replace with: % % ParseMetaGeometry(geometry,x,y,width,height); % % The format of the GetMagickGeometry method is: % % unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,ssize_t *y, % size_t *width,size_t *height) % % A description of each parameter follows: % % o geometry: Specifies a character string representing the geometry % specification. % % o x,y: A pointer to an integer. The x and y offset as determined by % the geometry specification is returned here. % % o width,height: A pointer to an unsigned integer. The width and height % as determined by the geometry specification is returned here. % */ MagickExport unsigned int GetMagickGeometry(const char *geometry,ssize_t *x, ssize_t *y,size_t *width,size_t *height) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3"); return(ParseMetaGeometry(geometry,x,y,width,height)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImage() returns the next image in a list. % % Deprecated, replace with: % % GetNextImageInList(images); % % The format of the GetNextImage method is: % % Image *GetNextImage(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *GetNextImage(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetNextImageInList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageAttribute() gets the next image attribute. % % Deprecated, replace with: % % const char *property; % property=GetNextImageProperty(image); % if (property != (const char *) NULL) % GetImageAttribute(image,property); % % The format of the GetNextImageAttribute method is: % % const ImageAttribute *GetNextImageAttribute(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const ImageAttribute *GetNextImageAttribute(const Image *image) { const char *property; property=GetNextImageProperty(image); if (property == (const char *) NULL) return((const ImageAttribute *) NULL); return(GetImageAttribute(image,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N u m b e r S c e n e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNumberScenes() returns the number of images in the list. % % Deprecated, replace with: % % GetImageListLength(image); % % The format of the GetNumberScenes method is: % % unsigned int GetNumberScenes(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport unsigned int GetNumberScenes(const Image *image) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return((unsigned int) GetImageListLength(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOnePixel() returns a single pixel at the specified (x,y) location. % The image background color is returned if an error occurs. % % Deprecated, replace with: % % GetOneAuthenticPixel(image,x,y,&pixel,&image->exception); % % The format of the GetOnePixel() method is: % % PixelPacket GetOnePixel(const Image image,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % */ MagickExport PixelPacket GetOnePixel(Image *image,const ssize_t x,const ssize_t y) { PixelPacket pixel; (void) GetOneAuthenticPixel(image,x,y,&pixel,&image->exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixels() returns the pixels associated with the last call to % QueueAuthenticPixels() or GetAuthenticPixels(). % % Deprecated, replace with: % % GetAuthenticPixelQueue(image); % % The format of the GetPixels() method is: % % PixelPacket *GetPixels(const Image image) % % A description of each parameter follows: % % o pixels: GetPixels() returns the pixels associated with the last call % to QueueAuthenticPixels() or GetAuthenticPixels(). % % o image: the image. % */ MagickExport PixelPacket *GetPixels(const Image *image) { return(GetAuthenticPixelQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P r e v i o u s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPreviousImage() returns the previous image in a list. % % Deprecated, replace with: % % GetPreviousImageInList(images)); % % The format of the GetPreviousImage method is: % % Image *GetPreviousImage(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *GetPreviousImage(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetPreviousImageInList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H S L T r a n s f o r m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HSLTransform() converts a (hue, saturation, lightness) to a (red, green, % blue) triple. % % The format of the HSLTransformImage method is: % % void HSLTransform(const double hue,const double saturation, % const double lightness,Quantum *red,Quantum *green,Quantum *blue) % % A description of each parameter follows: % % o hue, saturation, lightness: A double value representing a % component of the HSL color space. % % o red, green, blue: A pointer to a pixel component of type Quantum. % */ static inline MagickRealType HueToRGB(MagickRealType m1,MagickRealType m2, MagickRealType hue) { if (hue < 0.0) hue+=1.0; if (hue > 1.0) hue-=1.0; if ((6.0*hue) < 1.0) return(m1+6.0*(m2-m1)*hue); if ((2.0*hue) < 1.0) return(m2); if ((3.0*hue) < 2.0) return(m1+6.0*(m2-m1)*(2.0/3.0-hue)); return(m1); } MagickExport void HSLTransform(const double hue,const double saturation, const double lightness,Quantum *red,Quantum *green,Quantum *blue) { MagickRealType b, g, r, m1, m2; /* Convert HSL to RGB colorspace. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); if (lightness <= 0.5) m2=lightness*(saturation+1.0); else m2=lightness+saturation-lightness*saturation; m1=2.0*lightness-m2; r=HueToRGB(m1,m2,hue+1.0/3.0); g=HueToRGB(m1,m2,hue); b=HueToRGB(m1,m2,hue-1.0/3.0); *red=ClampToQuantum((MagickRealType) QuantumRange*r); *green=ClampToQuantum((MagickRealType) QuantumRange*g); *blue=ClampToQuantum((MagickRealType) QuantumRange*b); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i t y A f f i n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentityAffine() initializes the affine transform to the identity matrix. % % The format of the IdentityAffine method is: % % IdentityAffine(AffineMatrix *affine) % % A description of each parameter follows: % % o affine: A pointer the affine transform of type AffineMatrix. % */ MagickExport void IdentityAffine(AffineMatrix *affine) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); assert(affine != (AffineMatrix *) NULL); (void) memset(affine,0,sizeof(AffineMatrix)); affine->sx=1.0; affine->sy=1.0; } #if defined(MAGICKCORE_WINDOWS_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m a g e T o H B i t m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImageToHBITMAP() creates a Windows HBITMAP from an image. % % The format of the ImageToHBITMAP method is: % % HBITMAP ImageToHBITMAP(Image *image,Exceptioninfo *exception) % % A description of each parameter follows: % % o image: the image to convert. % */ MagickExport void *ImageToHBITMAP(Image *image,ExceptionInfo *exception) { BITMAP bitmap; HANDLE bitmap_bitsH; HBITMAP bitmapH; register ssize_t x; register const PixelPacket *p; register RGBQUAD *q; RGBQUAD *bitmap_bits; size_t length; ssize_t y; (void) memset(&bitmap,0,sizeof(bitmap)); bitmap.bmType=0; bitmap.bmWidth=(LONG) image->columns; bitmap.bmHeight=(LONG) image->rows; bitmap.bmWidthBytes=4*bitmap.bmWidth; bitmap.bmPlanes=1; bitmap.bmBitsPixel=32; bitmap.bmBits=NULL; length=bitmap.bmWidthBytes*bitmap.bmHeight; bitmap_bitsH=(HANDLE) GlobalAlloc(GMEM_MOVEABLE | GMEM_DDESHARE,length); if (bitmap_bitsH == NULL) { char *message; message=GetExceptionMessage(errno); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",message); message=DestroyString(message); return(NULL); } bitmap_bits=(RGBQUAD *) GlobalLock((HGLOBAL) bitmap_bitsH); q=bitmap_bits; if (bitmap.bmBits == NULL) bitmap.bmBits=bitmap_bits; (void) SetImageColorspace(image,sRGBColorspace); for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { q->rgbRed=ScaleQuantumToChar(GetPixelRed(p)); q->rgbGreen=ScaleQuantumToChar(GetPixelGreen(p)); q->rgbBlue=ScaleQuantumToChar(GetPixelBlue(p)); q->rgbReserved=0; p++; q++; } } bitmap.bmBits=bitmap_bits; bitmapH=CreateBitmapIndirect(&bitmap); if (bitmapH == NULL) { char *message; message=GetExceptionMessage(errno); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",message); message=DestroyString(message); } GlobalUnlock((HGLOBAL) bitmap_bitsH); GlobalFree((HGLOBAL) bitmap_bitsH); return((void *) bitmapH); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n i t i a l i z e M a g i c k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeMagick() initializes the ImageMagick environment. % % Deprecated, replace with: % % MagickCoreGenesis(path,MagickFalse); % % The format of the InitializeMagick function is: % % InitializeMagick(const char *path) % % A description of each parameter follows: % % o path: the execution path of the current ImageMagick client. % */ MagickExport void InitializeMagick(const char *path) { MagickCoreGenesis(path,MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolatePixelColor() applies bi-linear or tri-linear interpolation % between a pixel and it's neighbors. % % The format of the InterpolatePixelColor method is: % % MagickPixelPacket InterpolatePixelColor(const Image *image, % CacheView *view_info,InterpolatePixelMethod method,const double x, % const double y,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o image_view: the image cache view. % % o type: the type of pixel color interpolation. % % o x,y: A double representing the current (x,y) position of the pixel. % % o exception: return any errors or warnings in this structure. % */ static void BicubicInterpolate(const MagickPixelPacket *pixels,const double dx, MagickPixelPacket *pixel) { MagickRealType dx2, p, q, r, s; dx2=dx*dx; p=(pixels[3].red-pixels[2].red)-(pixels[0].red-pixels[1].red); q=(pixels[0].red-pixels[1].red)-p; r=pixels[2].red-pixels[0].red; s=pixels[1].red; pixel->red=(dx*dx2*p)+(dx2*q)+(dx*r)+s; p=(pixels[3].green-pixels[2].green)-(pixels[0].green-pixels[1].green); q=(pixels[0].green-pixels[1].green)-p; r=pixels[2].green-pixels[0].green; s=pixels[1].green; pixel->green=(dx*dx2*p)+(dx2*q)+(dx*r)+s; p=(pixels[3].blue-pixels[2].blue)-(pixels[0].blue-pixels[1].blue); q=(pixels[0].blue-pixels[1].blue)-p; r=pixels[2].blue-pixels[0].blue; s=pixels[1].blue; pixel->blue=(dx*dx2*p)+(dx2*q)+(dx*r)+s; p=(pixels[3].opacity-pixels[2].opacity)-(pixels[0].opacity-pixels[1].opacity); q=(pixels[0].opacity-pixels[1].opacity)-p; r=pixels[2].opacity-pixels[0].opacity; s=pixels[1].opacity; pixel->opacity=(dx*dx2*p)+(dx2*q)+(dx*r)+s; if (pixel->colorspace == CMYKColorspace) { p=(pixels[3].index-pixels[2].index)-(pixels[0].index-pixels[1].index); q=(pixels[0].index-pixels[1].index)-p; r=pixels[2].index-pixels[0].index; s=pixels[1].index; pixel->index=(dx*dx2*p)+(dx2*q)+(dx*r)+s; } } static inline MagickRealType CubicWeightingFunction(const MagickRealType x) { MagickRealType alpha, gamma; alpha=MagickMax(x+2.0,0.0); gamma=1.0*alpha*alpha*alpha; alpha=MagickMax(x+1.0,0.0); gamma-=4.0*alpha*alpha*alpha; alpha=MagickMax(x+0.0,0.0); gamma+=6.0*alpha*alpha*alpha; alpha=MagickMax(x-1.0,0.0); gamma-=4.0*alpha*alpha*alpha; return(gamma/6.0); } static inline double MeshInterpolate(const PointInfo *delta,const double p, const double x,const double y) { return(delta->x*x+delta->y*y+(1.0-delta->x-delta->y)*p); } static inline ssize_t NearestNeighbor(MagickRealType x) { if (x >= 0.0) return((ssize_t) (x+0.5)); return((ssize_t) (x-0.5)); } MagickExport MagickPixelPacket InterpolatePixelColor(const Image *image, CacheView *image_view,const InterpolatePixelMethod method,const double x, const double y,ExceptionInfo *exception) { MagickPixelPacket pixel; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image_view != (CacheView *) NULL); GetMagickPixelPacket(image,&pixel); switch (method) { case AverageInterpolatePixel: { double gamma; MagickPixelPacket pixels[16]; MagickRealType alpha[16]; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t) floor(y)-1,4,4,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 16L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } gamma=alpha[i]; gamma=PerceptibleReciprocal(gamma); pixel.red+=gamma*0.0625*pixels[i].red; pixel.green+=gamma*0.0625*pixels[i].green; pixel.blue+=gamma*0.0625*pixels[i].blue; pixel.opacity+=0.0625*pixels[i].opacity; if (image->colorspace == CMYKColorspace) pixel.index+=gamma*0.0625*pixels[i].index; p++; } break; } case BicubicInterpolatePixel: { MagickPixelPacket pixels[16], u[4]; MagickRealType alpha[16]; PointInfo delta; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t) floor(y)-1,4,4,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 4L; i++) GetMagickPixelPacket(image,u+i); for (i=0; i < 16L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } p++; } delta.x=x-floor(x); for (i=0; i < 4L; i++) { GetMagickPixelPacket(image,pixels+4*i); BicubicInterpolate(pixels+4*i,delta.x,u+i); } delta.y=y-floor(y); BicubicInterpolate(u,delta.y,&pixel); break; } case BilinearInterpolatePixel: default: { double gamma; MagickPixelPacket pixels[16]; MagickRealType alpha[16]; PointInfo delta; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t) floor(y),2,2,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 4L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } p++; } delta.x=x-floor(x); delta.y=y-floor(y); gamma=(((1.0-delta.y)*((1.0-delta.x)*alpha[0]+delta.x*alpha[1])+delta.y* ((1.0-delta.x)*alpha[2]+delta.x*alpha[3]))); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].red+delta.x* pixels[1].red)+delta.y*((1.0-delta.x)*pixels[2].red+delta.x* pixels[3].red)); pixel.green=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].green+delta.x* pixels[1].green)+delta.y*((1.0-delta.x)*pixels[2].green+ delta.x*pixels[3].green)); pixel.blue=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].blue+delta.x* pixels[1].blue)+delta.y*((1.0-delta.x)*pixels[2].blue+delta.x* pixels[3].blue)); pixel.opacity=((1.0-delta.y)*((1.0-delta.x)*pixels[0].opacity+delta.x* pixels[1].opacity)+delta.y*((1.0-delta.x)*pixels[2].opacity+delta.x* pixels[3].opacity)); if (image->colorspace == CMYKColorspace) pixel.index=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].index+delta.x* pixels[1].index)+delta.y*((1.0-delta.x)*pixels[2].index+delta.x* pixels[3].index)); break; } case FilterInterpolatePixel: { Image *excerpt_image, *filter_image; MagickPixelPacket pixels[1]; RectangleInfo geometry; geometry.width=4L; geometry.height=4L; geometry.x=(ssize_t) floor(x)-1L; geometry.y=(ssize_t) floor(y)-1L; excerpt_image=ExcerptImage(image,&geometry,exception); if (excerpt_image == (Image *) NULL) break; filter_image=ResizeImage(excerpt_image,1,1,image->filter,image->blur, exception); excerpt_image=DestroyImage(excerpt_image); if (filter_image == (Image *) NULL) break; p=GetVirtualPixels(filter_image,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { filter_image=DestroyImage(filter_image); break; } indexes=GetVirtualIndexQueue(filter_image); GetMagickPixelPacket(image,pixels); SetMagickPixelPacket(image,p,indexes,&pixel); filter_image=DestroyImage(filter_image); break; } case IntegerInterpolatePixel: { MagickPixelPacket pixels[1]; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t) floor(y),1,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); GetMagickPixelPacket(image,pixels); SetMagickPixelPacket(image,p,indexes,&pixel); break; } case MeshInterpolatePixel: { double gamma; MagickPixelPacket pixels[4]; MagickRealType alpha[4]; PointInfo delta, luminance; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t) floor(y),2,2,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 4L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } p++; } delta.x=x-floor(x); delta.y=y-floor(y); luminance.x=MagickPixelLuma(pixels+0)-MagickPixelLuma(pixels+3); luminance.y=MagickPixelLuma(pixels+1)-MagickPixelLuma(pixels+2); if (fabs(luminance.x) < fabs(luminance.y)) { /* Diagonal 0-3 NW-SE. */ if (delta.x <= delta.y) { /* Bottom-left triangle (pixel:2, diagonal: 0-3). */ delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[2].red, pixels[3].red,pixels[0].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[2].green, pixels[3].green,pixels[0].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[2].blue, pixels[3].blue,pixels[0].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[2].opacity, pixels[3].opacity,pixels[0].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[2].index, pixels[3].index,pixels[0].index); } else { /* Top-right triangle (pixel:1, diagonal: 0-3). */ delta.x=1.0-delta.x; gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[1].red, pixels[0].red,pixels[3].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[1].green, pixels[0].green,pixels[3].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[1].blue, pixels[0].blue,pixels[3].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[1].opacity, pixels[0].opacity,pixels[3].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[1].index, pixels[0].index,pixels[3].index); } } else { /* Diagonal 1-2 NE-SW. */ if (delta.x <= (1.0-delta.y)) { /* Top-left triangle (pixel 0, diagonal: 1-2). */ gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[0].red, pixels[1].red,pixels[2].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[0].green, pixels[1].green,pixels[2].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[0].blue, pixels[1].blue,pixels[2].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[0].opacity, pixels[1].opacity,pixels[2].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[0].index, pixels[1].index,pixels[2].index); } else { /* Bottom-right triangle (pixel: 3, diagonal: 1-2). */ delta.x=1.0-delta.x; delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[3].red, pixels[2].red,pixels[1].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[3].green, pixels[2].green,pixels[1].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[3].blue, pixels[2].blue,pixels[1].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[3].opacity, pixels[2].opacity,pixels[1].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[3].index, pixels[2].index,pixels[1].index); } } break; } case NearestNeighborInterpolatePixel: { MagickPixelPacket pixels[1]; p=GetCacheViewVirtualPixels(image_view,NearestNeighbor(x), NearestNeighbor(y),1,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); GetMagickPixelPacket(image,pixels); SetMagickPixelPacket(image,p,indexes,&pixel); break; } case SplineInterpolatePixel: { double gamma; MagickPixelPacket pixels[16]; MagickRealType alpha[16], dx, dy; PointInfo delta; ssize_t j, n; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t) floor(y)-1,4,4,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); n=0; delta.x=x-floor(x); delta.y=y-floor(y); for (i=(-1); i < 3L; i++) { dy=CubicWeightingFunction((MagickRealType) i-delta.y); for (j=(-1); j < 3L; j++) { GetMagickPixelPacket(image,pixels+n); SetMagickPixelPacket(image,p,indexes+n,pixels+n); alpha[n]=1.0; if (image->matte != MagickFalse) { alpha[n]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[n].red*=alpha[n]; pixels[n].green*=alpha[n]; pixels[n].blue*=alpha[n]; if (image->colorspace == CMYKColorspace) pixels[n].index*=alpha[n]; } dx=CubicWeightingFunction(delta.x-(MagickRealType) j); gamma=alpha[n]; gamma=PerceptibleReciprocal(gamma); pixel.red+=gamma*dx*dy*pixels[n].red; pixel.green+=gamma*dx*dy*pixels[n].green; pixel.blue+=gamma*dx*dy*pixels[n].blue; if (image->matte != MagickFalse) pixel.opacity+=dx*dy*pixels[n].opacity; if (image->colorspace == CMYKColorspace) pixel.index+=gamma*dx*dy*pixels[n].index; n++; p++; } } break; } } return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e A t t r i b u t e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageAttributes() replaces any embedded formatting characters with % the appropriate image attribute and returns the translated text. % % Deprecated, replace with: % % InterpretImageProperties(image_info,image,embed_text); % % The format of the InterpretImageAttributes method is: % % char *InterpretImageAttributes(const ImageInfo *image_info,Image *image, % const char *embed_text) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o embed_text: the address of a character string containing the embedded % formatting characters. % */ MagickExport char *InterpretImageAttributes(const ImageInfo *image_info, Image *image,const char *embed_text) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1"); return(InterpretImageProperties(image_info,image,embed_text)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e s R G B C o m p a n d o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InversesRGBCompandor() removes the gamma function from a sRGB pixel. % % The format of the InversesRGBCompandor method is: % % MagickRealType InversesRGBCompandor(const MagickRealType pixel) % % A description of each parameter follows: % % o pixel: the pixel. % */ MagickExport MagickRealType InversesRGBCompandor(const MagickRealType pixel) { if (pixel <= (0.0404482362771076*QuantumRange)) return(pixel/12.92); return(QuantumRange*pow((QuantumScale*pixel+0.055)/1.055,2.4)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M a g i c k I n s t a n t i a t e d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMagickInstantiated() returns MagickTrue if the ImageMagick environment % is currently instantiated: MagickCoreGenesis() has been called but % MagickDestroy() has not. % % The format of the IsMagickInstantiated method is: % % MagickBooleanType IsMagickInstantiated(void) % */ MagickExport MagickBooleanType IsMagickInstantiated(void) { return(IsMagickCoreInstantiated()); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I s S u b i m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsSubimage() returns MagickTrue if the geometry is a valid subimage % specification (e.g. [1], [1-9], [1,7,4]). % % The format of the IsSubimage method is: % % unsigned int IsSubimage(const char *geometry,const unsigned int pedantic) % % A description of each parameter follows: % % o geometry: This string is the geometry specification. % % o pedantic: A value other than 0 invokes a more restrictive set of % conditions for a valid specification (e.g. [1], [1-4], [4-1]). % */ MagickExport unsigned int IsSubimage(const char *geometry, const unsigned int pedantic) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (geometry == (const char *) NULL) return(MagickFalse); if ((strchr(geometry,'x') != (char *) NULL) || (strchr(geometry,'X') != (char *) NULL)) return(MagickFalse); if ((pedantic != MagickFalse) && (strchr(geometry,',') != (char *) NULL)) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() will map the given color to "black" and "white" % values, limearly spreading out the colors, and level values on a channel by % channel bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % Deprecated, replace with: % % LevelColorsImageChannel(image,channel,black_color,white_color,invert); % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image,const ChannelType channel, % const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickBooleanType LevelImageColors(Image *image,const ChannelType channel, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { return(LevelColorsImageChannel(image,channel,black_color,white_color,invert)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i b e r a t e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiberateMemory() frees memory that has already been allocated, and NULL's % the pointer to it. % % The format of the LiberateMemory method is: % % void LiberateMemory(void **memory) % % A description of each parameter follows: % % o memory: A pointer to a block of memory to free for reuse. % */ MagickExport void LiberateMemory(void **memory) { assert(memory != (void **) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (*memory == (void *) NULL) return; free(*memory); *memory=(void *) NULL; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i b e r a t e S e m a p h o r e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiberateSemaphoreInfo() relinquishes a semaphore. % % Deprecated, replace with: % % UnlockSemaphoreInfo(*semaphore_info); % % The format of the LiberateSemaphoreInfo method is: % % LiberateSemaphoreInfo(void **semaphore_info) % % A description of each parameter follows: % % o semaphore_info: Specifies a pointer to an SemaphoreInfo structure. % */ MagickExport void LiberateSemaphoreInfo(SemaphoreInfo **semaphore_info) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); UnlockSemaphoreInfo(*semaphore_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k I n c a r n a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickIncarnate() initializes the ImageMagick environment. % % Deprecated, replace with: % % MagickCoreGenesis(path,MagickFalse); % % The format of the MagickIncarnate function is: % % MagickIncarnate(const char *path) % % A description of each parameter follows: % % o path: the execution path of the current ImageMagick client. % */ MagickExport void MagickIncarnate(const char *path) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); MagickCoreGenesis(path,MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o n i t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMonitor() calls the monitor handler method with a text string that % describes the task and a measure of completion. The method returns % MagickTrue on success otherwise MagickFalse if an error is encountered, e.g. % if there was a user interrupt. % % The format of the MagickMonitor method is: % % MagickBooleanType MagickMonitor(const char *text, % const MagickOffsetType offset,const MagickSizeType span, % void *client_data) % % A description of each parameter follows: % % o offset: the position relative to the span parameter which represents % how much progress has been made toward completing a task. % % o span: the span relative to completing a task. % % o client_data: the client data. % */ MagickExport MagickBooleanType MagickMonitor(const char *text, const MagickOffsetType offset,const MagickSizeType span, void *magick_unused(client_data)) { ExceptionInfo *exception; MagickBooleanType status; magick_unreferenced(client_data); assert(text != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",text); ProcessPendingEvents(text); status=MagickTrue; exception=AcquireExceptionInfo(); if (monitor_handler != (MonitorHandler) NULL) status=(*monitor_handler)(text,offset,span,exception); exception=DestroyExceptionInfo(exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MapImage() replaces the colors of an image with the closest color from a % reference image. % % Deprecated, replace with: % % QuantizeInfo quantize_info; % GetQuantizeInfo(&quantize_info); % quantize_info.dither=dither; % RemapImage(&quantize_info,image,map_image); % % The format of the MapImage method is: % % MagickBooleanType MapImage(Image *image,const Image *map_image, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o map_image: the image. Reduce image to a set of colors represented by % this image. % % o dither: Set this integer value to something other than zero to % dither the mapped image. % */ MagickExport MagickBooleanType MapImage(Image *image,const Image *map_image, const MagickBooleanType dither) { QuantizeInfo quantize_info; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(map_image != (Image *) NULL); assert(map_image->signature == MagickCoreSignature); GetQuantizeInfo(&quantize_info); quantize_info.dither=dither; return(RemapImage(&quantize_info,image,map_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MapImages() replaces the colors of a sequence of images with the closest % color from a reference image. % % Deprecated, replace with: % % QuantizeInfo quantize_info; % GetQuantizeInfo(&quantize_info); % quantize_info.dither=dither; % RemapImages(&quantize_info,images,map_image); % % The format of the MapImage method is: % % MagickBooleanType MapImages(Image *images,Image *map_image, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to a set of Image structures. % % o map_image: the image. Reduce image to a set of colors represented by % this image. % % o dither: Set this integer value to something other than zero to % dither the quantized image. % */ MagickExport MagickBooleanType MapImages(Image *images,const Image *map_image, const MagickBooleanType dither) { QuantizeInfo quantize_info; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); GetQuantizeInfo(&quantize_info); quantize_info.dither=dither; return(RemapImages(&quantize_info,images,map_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MatteFloodfill() changes the transparency value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod % is specified, the transparency value is changed for any neighbor pixel % that does not match the bordercolor member of image. % % By default target must match a particular pixel transparency exactly. % However, in many cases two transparency values may differ by a % small amount. The fuzz member of image defines how much tolerance is % acceptable to consider two transparency values as the same. For example, % set fuzz to 10 and the opacity values of 100 and 102 respectively are % now interpreted as the same value for the purposes of the floodfill. % % The format of the MatteFloodfillImage method is: % % MagickBooleanType MatteFloodfillImage(Image *image, % const PixelPacket target,const Quantum opacity,const ssize_t x_offset, % const ssize_t y_offset,const PaintMethod method) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o opacity: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % % o x,y: the starting location of the operation. % % o method: Choose either FloodfillMethod or FillToBorderMethod. % */ MagickExport MagickBooleanType MatteFloodfillImage(Image *image, const PixelPacket target,const Quantum opacity,const ssize_t x_offset, const ssize_t y_offset,const PaintMethod method) { Image *floodplane_image; MagickBooleanType skip; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); /* Set floodfill color. */ segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize, sizeof(*segment_stack)); if (segment_stack == (SegmentInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Push initial segment on stack. */ x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); while (s > segment_stack) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception); q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; q--; p--; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetVirtualPixels(image,x,y,image->columns-x,1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; q++; p++; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) != MagickFalse) break; } else if (IsColorSimilar(image,p,&target) == MagickFalse) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Tile fill color onto floodplane. */ p=GetVirtualPixels(floodplane_image,0,y,image->columns,1, &image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) q->opacity=opacity; p++; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaximumImages() returns the maximum intensity of an image sequence. % % Deprecated, replace with: % % EvaluateImages(images,MinEvaluateOperator,exception); % % The format of the MaxImages method is: % % Image *MaximumImages(Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MaximumImages(const Image *images,ExceptionInfo *exception) { return(EvaluateImages(images,MinEvaluateOperator,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinimumImages() returns the minimum intensity of an image sequence. % % Deprecated, replace with: % % EvaluateImages(images,MinEvaluateOperator,exception); % % The format of the MinimumImages method is: % % Image *MinimumImages(Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinimumImages(const Image *images,ExceptionInfo *exception) { return(EvaluateImages(images,MinEvaluateOperator,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The algorithm was contributed by Mike Edmonds and implements an insertion % sort for selecting median color-channel values. For more on this algorithm % see "Skip Lists: A probabilistic Alternative to Balanced Trees" by William % Pugh in the June 1990 of Communications of the ACM. % % The format of the MedianFilterImage method is: % % Image *MedianFilterImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MedianFilterImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *median_image; median_image=StatisticImage(image,MedianStatistic,(size_t) radius,(size_t) radius,exception); return(median_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModeImage() makes each pixel the 'predominant color' of the neighborhood % of the specified radius. % % The format of the ModeImage method is: % % Image *ModeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ModeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *mode_image; mode_image=StatisticImage(image,ModeStatistic,(size_t) radius,(size_t) radius, exception); return(mode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MosaicImages() Obsolete Function: Use MergeImageLayers() instead. % % Deprecated, replace with: % % MergeImageLayers(image,MosaicLayer,exception); % % The format of the MosaicImage method is: % % Image *MosaicImages(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image list to be composited together % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MosaicImages(Image *image,ExceptionInfo *exception) { return(MergeImageLayers(image,MosaicLayer,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaqueImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaqueImage method is: % % MagickBooleanType OpaqueImage(Image *image, % const PixelPacket *target,const PixelPacket fill) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % */ MagickExport MagickBooleanType OpaqueImage(Image *image, const PixelPacket target,const PixelPacket fill) { #define OpaqueImageTag "Opaque/Image" MagickBooleanType proceed; register ssize_t i; ssize_t y; /* Make image color opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0"); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); switch (image->storage_class) { case DirectClass: default: { /* Make DirectClass image opaque. */ for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) != MagickFalse) *q=fill; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; proceed=SetImageProgress(image,OpaqueImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } break; } case PseudoClass: { /* Make PseudoClass image opaque. */ for (i=0; i < (ssize_t) image->colors; i++) { if (IsColorSimilar(image,&image->colormap[i],&target) != MagickFalse) image->colormap[i]=fill; } if (fill.opacity != OpaqueOpacity) { for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) != MagickFalse) q->opacity=fill.opacity; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } } (void) SyncImage(image); break; } } if (fill.opacity != OpaqueOpacity) image->matte=MagickTrue; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p e n C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenCacheView() opens a view into the pixel cache, using the % VirtualPixelMethod that is defined within the given image itself. % % Deprecated, replace with: % % AcquireVirtualCacheView(image,&image->exception); % % The format of the OpenCacheView method is: % % CacheView *OpenCacheView(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheView *OpenCacheView(const Image *image) { return(AcquireVirtualCacheView(image,&((Image *) image)->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p e n M a g i c k S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenMagickStream() opens the file at the specified path and return the % associated stream. % % The path of the OpenMagickStream method is: % % FILE *OpenMagickStream(const char *path,const char *mode) % % A description of each parameter follows. % % o path: the file path. % % o mode: the file mode. % */ #if defined(MAGICKCORE_HAVE__WFOPEN) static size_t UTF8ToUTF16(const unsigned char *utf8,wchar_t *utf16) { register const unsigned char *p; if (utf16 != (wchar_t *) NULL) { register wchar_t *q; wchar_t c; /* Convert UTF-8 to UTF-16. */ q=utf16; for (p=utf8; *p != '\0'; p++) { if ((*p & 0x80) == 0) *q=(*p); else if ((*p & 0xE0) == 0xC0) { c=(*p); *q=(c & 0x1F) << 6; p++; if ((*p & 0xC0) != 0x80) return(0); *q|=(*p & 0x3F); } else if ((*p & 0xF0) == 0xE0) { c=(*p); *q=c << 12; p++; if ((*p & 0xC0) != 0x80) return(0); c=(*p); *q|=(c & 0x3F) << 6; p++; if ((*p & 0xC0) != 0x80) return(0); *q|=(*p & 0x3F); } else return(0); q++; } *q++='\0'; return(q-utf16); } /* Compute UTF-16 string length. */ for (p=utf8; *p != '\0'; p++) { if ((*p & 0x80) == 0) ; else if ((*p & 0xE0) == 0xC0) { p++; if ((*p & 0xC0) != 0x80) return(0); } else if ((*p & 0xF0) == 0xE0) { p++; if ((*p & 0xC0) != 0x80) return(0); p++; if ((*p & 0xC0) != 0x80) return(0); } else return(0); } return(p-utf8); } static wchar_t *ConvertUTF8ToUTF16(const unsigned char *source) { size_t length; wchar_t *utf16; length=UTF8ToUTF16(source,(wchar_t *) NULL); if (length == 0) { register ssize_t i; /* Not UTF-8, just copy. */ length=strlen((const char *) source); utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16)); if (utf16 == (wchar_t *) NULL) return((wchar_t *) NULL); for (i=0; i <= (ssize_t) length; i++) utf16[i]=source[i]; return(utf16); } utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16)); if (utf16 == (wchar_t *) NULL) return((wchar_t *) NULL); length=UTF8ToUTF16(source,utf16); return(utf16); } #endif MagickExport FILE *OpenMagickStream(const char *path,const char *mode) { FILE *file; if ((path == (const char *) NULL) || (mode == (const char *) NULL)) { errno=EINVAL; return((FILE *) NULL); } file=(FILE *) NULL; #if defined(MAGICKCORE_HAVE__WFOPEN) { wchar_t *unicode_mode, *unicode_path; unicode_path=ConvertUTF8ToUTF16((const unsigned char *) path); if (unicode_path == (wchar_t *) NULL) return((FILE *) NULL); unicode_mode=ConvertUTF8ToUTF16((const unsigned char *) mode); if (unicode_mode == (wchar_t *) NULL) { unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path); return((FILE *) NULL); } file=_wfopen(unicode_path,unicode_mode); unicode_mode=(wchar_t *) RelinquishMagickMemory(unicode_mode); unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path); } #endif if (file == (FILE *) NULL) file=fopen(path,mode); return(file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PaintFloodfill() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % Deprecated, replace with: % % FloodfillPaintImage(image,channel,draw_info,target,x,y, % method == FloodfillMethod ? MagickFalse : MagickTrue); % % The format of the PaintFloodfillImage method is: % % MagickBooleanType PaintFloodfillImage(Image *image, % const ChannelType channel,const MagickPixelPacket target, % const ssize_t x,const ssize_t y,const DrawInfo *draw_info, % const PaintMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o x,y: the starting location of the operation. % % o draw_info: the draw info. % % o method: Choose either FloodfillMethod or FillToBorderMethod. % */ MagickExport MagickBooleanType PaintFloodfillImage(Image *image, const ChannelType channel,const MagickPixelPacket *target,const ssize_t x, const ssize_t y,const DrawInfo *draw_info,const PaintMethod method) { MagickBooleanType status; status=FloodfillPaintImage(image,channel,draw_info,target,x,y, method == FloodfillMethod ? MagickFalse : MagickTrue); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % P a i n t O p a q u e I m a g e % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % Deprecated, replace with: % % OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse); % OpaquePaintImageChannel(image,channel,target,fill,MagickFalse); % % The format of the PaintOpaqueImage method is: % % MagickBooleanType PaintOpaqueImage(Image *image, % const PixelPacket *target,const PixelPacket *fill) % MagickBooleanType PaintOpaqueImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % */ MagickExport MagickBooleanType PaintOpaqueImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill) { MagickBooleanType status; status=OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse); return(status); } MagickExport MagickBooleanType PaintOpaqueImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill) { return(OpaquePaintImageChannel(image,channel,target,fill,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PaintTransparentImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % Deprecated, replace with: % % TransparentPaintImage(image,target,opacity,MagickFalse); % % The format of the PaintTransparentImage method is: % % MagickBooleanType PaintTransparentImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o opacity: the replacement opacity value. % */ MagickExport MagickBooleanType PaintTransparentImage(Image *image, const MagickPixelPacket *target,const Quantum opacity) { return(TransparentPaintImage(image,target,opacity,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P a r s e I m a g e G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ParseImageGeometry() is similar to GetGeometry() except the returned % geometry is modified as determined by the meta characters: %, !, <, % and >. % % Deprecated, replace with: % % ParseMetaGeometry(geometry,x,y,width,height); % % The format of the ParseImageGeometry method is: % % int ParseImageGeometry(char *geometry,ssize_t *x,ssize_t *y, % size_t *width,size_t *height) % % A description of each parameter follows: % % o flags: Method ParseImageGeometry returns a bitmask that indicates % which of the four values were located in the geometry string. % % o image_geometry: Specifies a character string representing the geometry % specification. % % o x,y: A pointer to an integer. The x and y offset as determined by % the geometry specification is returned here. % % o width,height: A pointer to an unsigned integer. The width and height % as determined by the geometry specification is returned here. % */ MagickExport int ParseImageGeometry(const char *geometry,ssize_t *x,ssize_t *y, size_t *width,size_t *height) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); return((int) ParseMetaGeometry(geometry,x,y,width,height)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P a r s e S i z e G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ParseSizeGeometry() returns a region as defined by the geometry string with % respect to the image dimensions and aspect ratio. % % Deprecated, replace with: % % ParseMetaGeometry(geometry,&region_info->x,&region_info->y, % &region_info->width,&region_info->height); % % The format of the ParseSizeGeometry method is: % % MagickStatusType ParseSizeGeometry(const Image *image, % const char *geometry,RectangeInfo *region_info) % % A description of each parameter follows: % % o geometry: The geometry (e.g. 100x100+10+10). % % o region_info: the region as defined by the geometry string. % */ MagickExport MagickStatusType ParseSizeGeometry(const Image *image, const char *geometry,RectangleInfo *region_info) { MagickStatusType flags; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.4.7"); SetGeometry(image,region_info); flags=ParseMetaGeometry(geometry,&region_info->x,&region_info->y, &region_info->width,&region_info->height); return(flags); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o p I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PopImageList() removes the last image in the list. % % Deprecated, replace with: % % RemoveLastImageFromList(images); % % The format of the PopImageList method is: % % Image *PopImageList(Image **images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *PopImageList(Image **images) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(RemoveLastImageFromList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o p I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PopImagePixels() transfers one or more pixel components from the image pixel % cache to a user supplied buffer. The pixels are returned in network byte % order. MagickTrue is returned if the pixels are successfully transferred, % otherwise MagickFalse. % % The format of the PopImagePixels method is: % % size_t PopImagePixels(Image *,const QuantumType quantum, % unsigned char *destination) % % A description of each parameter follows: % % o image: the image. % % o quantum: Declare which pixel components to transfer (RGB, RGBA, etc). % % o destination: The components are transferred to this buffer. % */ MagickExport size_t PopImagePixels(Image *image,const QuantumType quantum, unsigned char *destination) { QuantumInfo *quantum_info; size_t length; quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image); if (quantum_info == (QuantumInfo *) NULL) return(0); length=ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info, quantum,destination,&image->exception); quantum_info=DestroyQuantumInfo(quantum_info); return(length); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t s c r i p t G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PostscriptGeometry() replaces any page mneumonic with the equivalent size in % picas. % % Deprecated, replace with: % % GetPageGeometry(page); % % The format of the PostscriptGeometry method is: % % char *PostscriptGeometry(const char *page) % % A description of each parameter follows. % % o page: Specifies a pointer to an array of characters. % The string is either a Postscript page name (e.g. A4) or a postscript % page geometry (e.g. 612x792+36+36). % */ MagickExport char *PostscriptGeometry(const char *page) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); return(GetPageGeometry(page)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P u s h I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PushImageList() adds an image to the end of the list. % % Deprecated, replace with: % % AppendImageToList(images,CloneImageList(image,exception)); % % The format of the PushImageList method is: % % unsigned int PushImageList(Image *images,const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int PushImageList(Image **images,const Image *image, ExceptionInfo *exception) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); AppendImageToList(images,CloneImageList(image,exception)); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P u s h I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PushImagePixels() transfers one or more pixel components from a user % supplied buffer into the image pixel cache of an image. The pixels are % expected in network byte order. It returns MagickTrue if the pixels are % successfully transferred, otherwise MagickFalse. % % The format of the PushImagePixels method is: % % size_t PushImagePixels(Image *image,const QuantumType quantum, % const unsigned char *source) % % A description of each parameter follows: % % o image: the image. % % o quantum: Declare which pixel components to transfer (red, green, blue, % opacity, RGB, or RGBA). % % o source: The pixel components are transferred from this buffer. % */ MagickExport size_t PushImagePixels(Image *image,const QuantumType quantum, const unsigned char *source) { QuantumInfo *quantum_info; size_t length; quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image); if (quantum_info == (QuantumInfo *) NULL) return(0); length=ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,quantum, source,&image->exception); quantum_info=DestroyQuantumInfo(quantum_info); return(length); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z a t i o n E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizationError() measures the difference between the original and % quantized images. This difference is the total quantization error. The % error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % Deprecated, replace with: % % GetImageQuantizeError(image); % % The format of the QuantizationError method is: % % unsigned int QuantizationError(Image *image) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % */ MagickExport unsigned int QuantizationError(Image *image) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3"); return(GetImageQuantizeError(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a d i a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RadialBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RadialBlurImage method is: % % Image *RadialBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % Image *RadialBlurImageChannel(const Image *image,const ChannelType channel, % const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o angle: the angle of the radial blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RadialBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { return(RotationalBlurImage(image,angle,exception)); } MagickExport Image *RadialBlurImageChannel(const Image *image, const ChannelType channel,const double angle,ExceptionInfo *exception) { return(RotationalBlurImageChannel(image,channel,angle,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % R a n d o m C h a n n e l T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomChannelThresholdImage() changes the value of individual pixels based % on the intensity of each pixel compared to a random threshold. The result % is a low-contrast, two color image. % % The format of the RandomChannelThresholdImage method is: % % unsigned int RandomChannelThresholdImage(Image *image, % const char *channel, const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing LOWxHIGH thresholds. % If the string contains 2x2, 3x3, or 4x4, then an ordered % dither of order 2, 3, or 4 will be performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int RandomChannelThresholdImage(Image *image, const char *channel,const char *thresholds,ExceptionInfo *exception) { #define RandomChannelThresholdImageText " RandomChannelThreshold image... " double lower_threshold, upper_threshold; RandomInfo *random_info; ssize_t count, y; static MagickRealType o2[4]={0.2f, 0.6f, 0.8f, 0.4f}, o3[9]={0.1f, 0.6f, 0.3f, 0.7f, 0.5f, 0.8f, 0.4f, 0.9f, 0.2f}, o4[16]={0.1f, 0.7f, 1.1f, 0.3f, 1.0f, 0.5f, 1.5f, 0.8f, 1.4f, 1.6f, 0.6f, 1.2f, 0.4f, 0.9f, 1.3f, 0.2f}, threshold=128; size_t order; /* Threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (thresholds == (const char *) NULL) return(MagickTrue); lower_threshold=0; upper_threshold=0; if (LocaleCompare(thresholds,"2x2") == 0) order=2; else if (LocaleCompare(thresholds,"3x3") == 0) order=3; else if (LocaleCompare(thresholds,"4x4") == 0) order=4; else { order=1; count=(ssize_t) sscanf(thresholds,"%lf[/x%%]%lf",&lower_threshold, &upper_threshold); if (strchr(thresholds,'%') != (char *) NULL) { upper_threshold*=(.01*QuantumRange); lower_threshold*=(.01*QuantumRange); } if (count == 1) upper_threshold=(MagickRealType) QuantumRange-lower_threshold; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " RandomChannelThresholdImage: channel type=%s",channel); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Thresholds: %s (%fx%f)",thresholds,lower_threshold,upper_threshold); if (LocaleCompare(channel,"all") == 0 || LocaleCompare(channel,"intensity") == 0) if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfo(); for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register IndexPacket index, *magick_restrict indexes; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; if (LocaleCompare(channel,"all") == 0 || LocaleCompare(channel,"intensity") == 0) { indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=GetPixelIntensity(image,q); if (order == 1) { if (intensity < lower_threshold) threshold=lower_threshold; else if (intensity > upper_threshold) threshold=upper_threshold; else threshold=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info)); } else if (order == 2) threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)]; else if (order == 3) threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)]; else if (order == 4) threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)]; index=(IndexPacket) (intensity <= threshold ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } } if (LocaleCompare(channel,"opacity") == 0 || LocaleCompare(channel,"all") == 0 || LocaleCompare(channel,"matte") == 0) { if (image->matte != MagickFalse) for (x=0; x < (ssize_t) image->columns; x++) { if (order == 1) { if ((MagickRealType) q->opacity < lower_threshold) threshold=lower_threshold; else if ((MagickRealType) q->opacity > upper_threshold) threshold=upper_threshold; else threshold=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info)); } else if (order == 2) threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)]; else if (order == 3) threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)]; else if (order == 4) threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)]/1.7; SetPixelOpacity(q,(MagickRealType) q->opacity <= threshold ? 0 : QuantumRange); q++; } } else { /* To Do: red, green, blue, cyan, magenta, yellow, black */ if (LocaleCompare(channel,"intensity") != 0) ThrowBinaryException(OptionError,"UnrecognizedChannelType", image->filename); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } random_info=DestroyRandomInfo(random_info); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a c q u i r e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReacquireMemory() changes the size of the memory and returns a pointer to % the (possibly moved) block. The contents will be unchanged up to the % lesser of the new and old sizes. % % The format of the ReacquireMemory method is: % % void ReacquireMemory(void **memory,const size_t size) % % A description of each parameter follows: % % o memory: A pointer to a memory allocation. On return the pointer % may change but the contents of the original allocation will not. % % o size: the new size of the allocated memory. % */ MagickExport void ReacquireMemory(void **memory,const size_t size) { void *allocation; assert(memory != (void **) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (*memory == (void *) NULL) { *memory=AcquireMagickMemory(size); return; } allocation=realloc(*memory,size); if (allocation == (void *) NULL) *memory=RelinquishMagickMemory(*memory); *memory=allocation; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RecolorImage() apply color transformation to an image. The method permits % saturation changes, hue rotation, luminance to alpha, and various other % effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the RecolorImage method is: % % Image *RecolorImage(const Image *image,const size_t order, % const double *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o order: the number of columns and rows in the recolor matrix. % % o color_matrix: An array of double representing the recolor matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RecolorImage(const Image *image,const size_t order, const double *color_matrix,ExceptionInfo *exception) { KernelInfo *kernel_info; Image *recolor_image; kernel_info=AcquireKernelInfo("1"); if (kernel_info == (KernelInfo *) NULL) return((Image *) NULL); kernel_info->width=order; kernel_info->height=order; kernel_info->values=(double *) color_matrix; recolor_image=ColorMatrixImage(image,kernel_info,exception); kernel_info->values=(double *) NULL; kernel_info=DestroyKernelInfo(kernel_info); return(recolor_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceNoiseImage() smooths the contours of an image while still preserving % edge information. The algorithm works by replacing each pixel with its % neighbor closest in value. A neighbor is defined by radius. Use a radius % of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the ReduceNoiseImage method is: % % Image *ReduceNoiseImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReduceNoiseImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *reduce_image; reduce_image=StatisticImage(image,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); return(reduce_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n g u i s h S e m a p h o r e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishSemaphoreInfo() relinquishes a semaphore. % % The format of the RelinquishSemaphoreInfo method is: % % RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info) % % A description of each parameter follows: % % o semaphore_info: Specifies a pointer to an SemaphoreInfo structure. % */ MagickExport void RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info) { assert(semaphore_info != (SemaphoreInfo *) NULL); UnlockSemaphoreInfo(semaphore_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e A t t r i b u t e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageAttributeIterator() resets the image attributes iterator. Use it % in conjunction with GetNextImageAttribute() to iterate over all the values % associated with an image. % % Deprecated, replace with: % % ResetImagePropertyIterator(image); % % The format of the ResetImageAttributeIterator method is: % % ResetImageAttributeIterator(const ImageInfo *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageAttributeIterator(const Image *image) { ResetImagePropertyIterator(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as % defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows, % GetCacheViewException(cache_view)); % % The format of the SetCacheViewPixels method is: % % PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows) { PixelPacket *pixels; pixels=QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows, GetCacheViewException(cache_view)); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t C a c h e T h e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetCacheThreshold() sets the amount of free memory allocated for the pixel % cache. Once this threshold is exceeded, all subsequent pixels cache % operations are to/from disk. % % The format of the SetCacheThreshold() method is: % % void SetCacheThreshold(const size_t threshold) % % A description of each parameter follows: % % o threshold: the number of megabytes of memory available to the pixel % cache. % */ MagickExport void SetCacheThreshold(const size_t size) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); (void) SetMagickResourceLimit(MemoryResource,size*1024*1024); (void) SetMagickResourceLimit(MapResource,2*size*1024*1024); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t E x c e p t i o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetExceptionInfo() sets the exception severity. % % The format of the SetExceptionInfo method is: % % MagickBooleanType SetExceptionInfo(ExceptionInfo *exception, % ExceptionType severity) % % A description of each parameter follows: % % o exception: the exception info. % % o severity: the exception severity. % */ MagickExport MagickBooleanType SetExceptionInfo(ExceptionInfo *exception, ExceptionType severity) { assert(exception != (ExceptionInfo *) NULL); ClearMagickException(exception); exception->severity=severity; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImage() sets the red, green, and blue components of each pixel to % the image background color and the opacity component to the specified % level of transparency. The background color is defined by the % background_color member of the image. % % The format of the SetImage method is: % % void SetImage(Image *image,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o opacity: Set each pixel to this level of transparency. % */ MagickExport void SetImage(Image *image,const Quantum opacity) { PixelPacket background_color; ssize_t y; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.0"); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); background_color=image->background_color; if (opacity != OpaqueOpacity) background_color.opacity=opacity; if (background_color.opacity != OpaqueOpacity) { (void) SetImageStorageClass(image,DirectClass); image->matte=MagickTrue; } if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) { /* Set colormapped or CMYK image. */ for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRGBO(q,&background_color); q++; } indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,0); if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } return; } /* Set DirectClass image. */ for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRGBO(q,&background_color); q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAttribute() searches the list of image attributes and replaces the % attribute value. If it is not found in the list, the attribute name % and value is added to the list. % % Deprecated, replace with: % % SetImageProperty(image,key,value); % % The format of the SetImageAttribute method is: % % MagickBooleanType SetImageAttribute(Image *image,const char *key, % const char *value) % % A description of each parameter follows: % % o image: the image. % % o key: the key. % % o value: the value. % */ MagickExport MagickBooleanType SetImageAttribute(Image *image,const char *key, const char *value) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1"); return(SetImageProperty(image,key,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageList() inserts an image into the list at the specified position. % % The format of the SetImageList method is: % % unsigned int SetImageList(Image *images,const Image *image, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o image: the image. % % o offset: the position within the list. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int SetImageList(Image **images,const Image *image, const ssize_t offset,ExceptionInfo *exception) { Image *clone; register ssize_t i; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); clone=CloneImageList(image,exception); while (GetPreviousImageInList(*images) != (Image *) NULL) (*images)=GetPreviousImageInList(*images); for (i=0; i < offset; i++) { if (GetNextImageInList(*images) == (Image *) NULL) return(MagickFalse); (*images)=GetNextImageInList(*images); } InsertImageInList(images,clone); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImagePixels() queues a mutable pixel region. % If the region is successfully initialized a pointer to a PixelPacket % array representing the region is returned, otherwise NULL is returned. % The returned pointer may point to a temporary working buffer for the % pixels or it may point to the final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This useful while the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % SetImagePixels() any way it pleases. SetImagePixels() does not initialize % the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in RAM, or in a % memory-mapped file. The returned pointer should *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain % the black color component or the colormap indexes (of type IndexPacket) % corresponding to the region. Once the PixelPacket (and/or IndexPacket) % array has been updated, the changes must be saved back to the underlying % image using SyncAuthenticPixels() or they may be lost. % % Deprecated, replace with: % % QueueAuthenticPixels(image,x,y,columns,rows,&image->exception); % % The format of the SetImagePixels() method is: % % PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows) % % A description of each parameter follows: % % o pixels: SetImagePixels returns a pointer to the pixels if they are % transferred, otherwise a NULL is returned. % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows) { return(QueueAuthenticPixels(image,x,y,columns,rows,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMagickRegistry() sets a blob into the registry and returns a unique ID. % If an error occurs, -1 is returned. % % The format of the SetMagickRegistry method is: % % ssize_t SetMagickRegistry(const RegistryType type,const void *blob, % const size_t length,ExceptionInfo *exception) % % A description of each parameter follows: % % o type: the registry type. % % o blob: the address of a Binary Large OBject. % % o length: For a registry type of ImageRegistryType use sizeof(Image) % otherise the blob length in number of bytes. % % o exception: return any errors or warnings in this structure. % */ MagickExport ssize_t SetMagickRegistry(const RegistryType type,const void *blob, const size_t magick_unused(length),ExceptionInfo *exception) { char key[MaxTextExtent]; MagickBooleanType status; static ssize_t id = 0; magick_unreferenced(length); (void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id); status=SetImageRegistry(type,key,blob,exception); if (status == MagickFalse) return(-1); return(id++); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M o n i t o r H a n d l e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMonitorHandler() sets the monitor handler to the specified method % and returns the previous monitor handler. % % The format of the SetMonitorHandler method is: % % MonitorHandler SetMonitorHandler(MonitorHandler handler) % % A description of each parameter follows: % % o handler: Specifies a pointer to a method to handle monitors. % */ MagickExport MonitorHandler GetMonitorHandler(void) { return(monitor_handler); } MagickExport MonitorHandler SetMonitorHandler(MonitorHandler handler) { MonitorHandler previous_handler; previous_handler=monitor_handler; monitor_handler=handler; return(previous_handler); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h i f t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShiftImageList() removes an image from the beginning of the list. % % Deprecated, replace with: % % RemoveFirstImageFromList(images); % % The format of the ShiftImageList method is: % % Image *ShiftImageList(Image **images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *ShiftImageList(Image **images) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(RemoveFirstImageFromList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S i z e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SizeBlob() returns the current length of the image file or blob. % % Deprecated, replace with: % % GetBlobSize(image); % % The format of the SizeBlob method is: % % off_t SizeBlob(Image *image) % % A description of each parameter follows: % % o size: Method SizeBlob returns the current length of the image file % or blob. % % o image: the image. % */ MagickExport MagickOffsetType SizeBlob(Image *image) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3"); return((MagickOffsetType) GetBlobSize(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImageList() removes the images designated by offset and length from % the list and replaces them with the specified list. % % The format of the SpliceImageList method is: % % Image *SpliceImageList(Image *images,const ssize_t offset, % const size_t length,const Image *splices, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o offset: the position within the list. % % o length: the length of the image list to remove. % % o splice: Replace the removed image list with this list. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImageList(Image *images,const ssize_t offset, const size_t length,const Image *splices,ExceptionInfo *exception) { Image *clone; register ssize_t i; if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); clone=CloneImageList(splices,exception); while (GetPreviousImageInList(images) != (Image *) NULL) images=GetPreviousImageInList(images); for (i=0; i < offset; i++) { if (GetNextImageInList(images) == (Image *) NULL) return((Image *) NULL); images=GetNextImageInList(images); } (void) SpliceImageIntoList(&images,length,clone); return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % s R G B C o m p a n d o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBCompandor() adds the gamma function to a sRGB pixel. % % The format of the sRGBCompandor method is: % % MagickRealType sRGBCompandor(const MagickRealType pixel) % % A description of each parameter follows: % % o pixel: the pixel. % */ MagickExport MagickRealType sRGBCompandor(const MagickRealType pixel) { if (pixel <= (0.0031306684425005883*QuantumRange)) return(12.92*pixel); return(QuantumRange*(1.055*pow(QuantumScale*pixel,1.0/2.4)-0.055)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Strip() strips any whitespace or quotes from the beginning and end of a % string of characters. % % The format of the Strip method is: % % void Strip(char *message) % % A description of each parameter follows: % % o message: Specifies an array of characters. % */ MagickExport void Strip(char *message) { register char *p, *q; assert(message != (char *) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (*message == '\0') return; if (strlen(message) == 1) return; p=message; while (isspace((int) ((unsigned char) *p)) != 0) p++; if ((*p == '\'') || (*p == '"')) p++; q=message+strlen(message)-1; while ((isspace((int) ((unsigned char) *q)) != 0) && (q > p)) q--; if (q > p) if ((*q == '\'') || (*q == '"')) q--; (void) memcpy(message,p,(size_t) (q-p+1)); message[q-p+1]='\0'; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncCacheView() saves the cache view pixels to the in-memory or disk % cache. It returns MagickTrue if the pixel region is synced, otherwise % MagickFalse. % % Deprecated, replace with: % % SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view)); % % The format of the SyncCacheView method is: % % MagickBooleanType SyncCacheView(CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % */ MagickExport MagickBooleanType SyncCacheView(CacheView *cache_view) { MagickBooleanType status; status=SyncCacheViewAuthenticPixels(cache_view, GetCacheViewException(cache_view)); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncCacheViewPixels() saves the cache view pixels to the in-memory % or disk cache. It returns MagickTrue if the pixel region is flushed, % otherwise MagickFalse. % % Deprecated, replace with: % % SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view)); % % The format of the SyncCacheViewPixels method is: % % MagickBooleanType SyncCacheViewPixels(CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncCacheViewPixels(CacheView *cache_view) { MagickBooleanType status; status=SyncCacheViewAuthenticPixels(cache_view, GetCacheViewException(cache_view)); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is synced, otherwise % MagickFalse. % % Deprecated, replace with: % % SyncAuthenticPixels(image,&image->exception); % % The format of the SyncImagePixels() method is: % % MagickBooleanType SyncImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType SyncImagePixels(Image *image) { return(SyncAuthenticPixels(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y s t e m C o m m a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SystemCommand() executes the specified command and waits until it % terminates. The returned value is the exit status of the command. % % The format of the SystemCommand method is: % % int SystemCommand(const MagickBooleanType asynchronous, % const MagickBooleanType verbose,const char *command, % ExceptionInfo *exception) % % A description of each parameter follows: % % o asynchronous: a value other than 0 executes the parent program % concurrently with the new child process. % % o verbose: a value other than 0 prints the executed command before it is % invoked. % % o command: this string is the command to execute. % % o exception: return any errors here. % */ MagickExport int SystemCommand(const MagickBooleanType asynchronous, const MagickBooleanType verbose,const char *command,ExceptionInfo *exception) { int status; status=ExternalDelegateCommand(asynchronous,verbose,command,(char *) NULL, exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e m p o r a r y F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TemporaryFilename() replaces the contents of path by a unique path name. % % The format of the TemporaryFilename method is: % % void TemporaryFilename(char *path) % % A description of each parameter follows. % % o path: Specifies a pointer to an array of characters. The unique path % name is returned in this array. % */ MagickExport void TemporaryFilename(char *path) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6"); (void) AcquireUniqueFilename(path); (void) RelinquishUniqueFileResource(path); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThresholdImage() changes the value of individual pixels based on % the intensity of each pixel compared to threshold. The result is a % high-contrast, two color image. % % The format of the ThresholdImage method is: % % unsigned int ThresholdImage(Image *image,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value % */ MagickExport unsigned int ThresholdImage(Image *image,const double threshold) { #define ThresholdImageTag "Threshold/Image" IndexPacket index; ssize_t y; /* Threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (!AcquireImageColormap(image,2)) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", "UnableToThresholdImage"); for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { index=(IndexPacket) (GetPixelIntensity(image,q) <= threshold ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (!SyncAuthenticPixels(image,&image->exception)) break; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h r e s h o l d I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThresholdImageChannel() changes the value of individual pixels based on % the intensity of each pixel channel. The result is a high-contrast image. % % The format of the ThresholdImageChannel method is: % % unsigned int ThresholdImageChannel(Image *image,const char *threshold) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % */ MagickExport unsigned int ThresholdImageChannel(Image *image, const char *threshold) { #define ThresholdImageTag "Threshold/Image" MagickPixelPacket pixel; GeometryInfo geometry_info; IndexPacket index; ssize_t y; unsigned int flags; /* Threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (threshold == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&pixel); flags=ParseGeometry(threshold,&geometry_info); pixel.red=geometry_info.rho; if (flags & SigmaValue) pixel.green=geometry_info.sigma; else pixel.green=pixel.red; if (flags & XiValue) pixel.blue=geometry_info.xi; else pixel.blue=pixel.red; if (flags & PsiValue) pixel.opacity=geometry_info.psi; else pixel.opacity=(MagickRealType) OpaqueOpacity; if (flags & PercentValue) { pixel.red*=QuantumRange/100.0f; pixel.green*=QuantumRange/100.0f; pixel.blue*=QuantumRange/100.0f; pixel.opacity*=QuantumRange/100.0f; } if (!(flags & SigmaValue)) { if (!AcquireImageColormap(image,2)) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", "UnableToThresholdImage"); if (pixel.red == 0) (void) GetImageDynamicThreshold(image,2.0,2.0,&pixel,&image->exception); } for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (IsMagickGray(&pixel) != MagickFalse) for (x=0; x < (ssize_t) image->columns; x++) { index=(IndexPacket) (GetPixelIntensity(image,q) <= pixel.red ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRed(q,image->colormap[(ssize_t) index].red); SetPixelGreen(q,image->colormap[(ssize_t) index].green); SetPixelBlue(q,image->colormap[(ssize_t) index].blue); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,(MagickRealType) q->red <= pixel.red ? 0 : QuantumRange); SetPixelGreen(q,(MagickRealType) q->green <= pixel.green ? 0 : QuantumRange); SetPixelBlue(q,(MagickRealType) q->blue <= pixel.blue ? 0 : QuantumRange); SetPixelOpacity(q,(MagickRealType) q->opacity <= pixel.opacity ? 0 : QuantumRange); q++; } if (!SyncAuthenticPixels(image,&image->exception)) break; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformColorspace() converts the image to a specified colorspace. % If the image is already in the requested colorspace, no work is performed. % Note that the current colorspace is stored in the image colorspace member. % The transformation matrices are not necessarily the standard ones: the % weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % Deprecated, replace with: % % TransformImageColorspace(image,colorspace); % % The format of the TransformColorspace method is: % % unsigned int (void) TransformColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image to transform % % o colorspace: the desired colorspace. % */ MagickExport unsigned int TransformColorspace(Image *image, const ColorspaceType colorspace) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6"); return(TransformImageColorspace(image,colorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m H S L % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformHSL() converts a (red, green, blue) to a (hue, saturation, % lightness) triple. % % The format of the TransformHSL method is: % % void TransformHSL(const Quantum red,const Quantum green, % const Quantum blue,double *hue,double *saturation,double *lightness) % % A description of each parameter follows: % % o red, green, blue: A Quantum value representing the red, green, and % blue component of a pixel.. % % o hue, saturation, lightness: A pointer to a double value representing a % component of the HSL color space. % */ MagickExport void TransformHSL(const Quantum red,const Quantum green, const Quantum blue,double *hue,double *saturation,double *lightness) { MagickRealType b, delta, g, max, min, r; /* Convert RGB to HSL colorspace. */ assert(hue != (double *) NULL); assert(saturation != (double *) NULL); assert(lightness != (double *) NULL); r=QuantumScale*red; g=QuantumScale*green; b=QuantumScale*blue; max=MagickMax(r,MagickMax(g,b)); min=MagickMin(r,MagickMin(g,b)); *hue=0.0; *saturation=0.0; *lightness=(double) ((min+max)/2.0); delta=max-min; if (delta == 0.0) return; *saturation=(double) (delta/((*lightness < 0.5) ? (min+max) : (2.0-max-min))); if (r == max) *hue=(double) (g == min ? 5.0+(max-b)/delta : 1.0-(max-g)/delta); else if (g == max) *hue=(double) (b == min ? 1.0+(max-r)/delta : 3.0-(max-b)/delta); else *hue=(double) (r == min ? 3.0+(max-g)/delta : 5.0-(max-r)/delta); *hue/=6.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s l a t e T e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TranslateText() replaces any embedded formatting characters with the % appropriate image attribute and returns the translated text. % % Deprecated, replace with: % % InterpretImageProperties(image_info,image,embed_text); % % The format of the TranslateText method is: % % char *TranslateText(const ImageInfo *image_info,Image *image, % const char *embed_text) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o embed_text: the address of a character string containing the embedded % formatting characters. % */ MagickExport char *TranslateText(const ImageInfo *image_info,Image *image, const char *embed_text) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.6"); return(InterpretImageProperties(image_info,image,embed_text)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentImage method is: % % MagickBooleanType TransparentImage(Image *image, % const PixelPacket target,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o opacity: the replacement opacity value. % */ MagickExport MagickBooleanType TransparentImage(Image *image, const PixelPacket target,const Quantum opacity) { #define TransparentImageTag "Transparent/Image" MagickBooleanType proceed; ssize_t y; /* Make image color transparent. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0"); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) != MagickFalse) q->opacity=opacity; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; proceed=SetImageProgress(image,TransparentImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h i f t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnshiftImageList() adds the image to the beginning of the list. % % Deprecated, replace with: % % PrependImageToList(images,CloneImageList(image,exception)); % % The format of the UnshiftImageList method is: % % unsigned int UnshiftImageList(Image *images,const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int UnshiftImageList(Image **images,const Image *image, ExceptionInfo *exception) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); PrependImageToList(images,CloneImageList(image,exception)); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + V a l i d a t e C o l o r m a p I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ValidateColormapIndex() validates the colormap index. If the index does % not range from 0 to the number of colors in the colormap an exception % issued and 0 is returned. % % Deprecated, replace with: % % ConstrainColormapIndex(image,index); % % The format of the ValidateColormapIndex method is: % % IndexPacket ValidateColormapIndex(Image *image,const unsigned int index) % % A description of each parameter follows: % % o index: Method ValidateColormapIndex returns colormap index if it is % valid other an exception issued and 0 is returned. % % o image: the image. % % o index: This integer is the colormap index. % */ MagickExport IndexPacket ValidateColormapIndex(Image *image, const size_t index) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.4"); return(ConstrainColormapIndex(image,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z o o m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZoomImage() creates a new image that is a scaled size of an existing one. % It allocates the memory necessary for the new Image structure and returns a % pointer to the new image. The Point filter gives fast pixel replication, % Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower, % very high-quality results. See Graphic Gems III for details on this % algorithm. % % The filter member of the Image structure specifies which image filter to % use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp. % % The format of the ZoomImage method is: % % Image *ZoomImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: An integer that specifies the number of columns in the zoom % image. % % o rows: An integer that specifies the number of rows in the scaled % image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ZoomImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { Image *zoom_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur, exception); return(zoom_image); } #endif
2278.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { #pragma omp target teams distribute schedule(static, 2) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp target teams distribute schedule(static, 2) for (i = 0; i < _PB_N; i++) { #pragma omp target teams distribute schedule(static, 2) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ #pragma omp target teams distribute schedule(static, 2) for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp target teams distribute schedule(static, 2) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
ast-dump-openmp-taskloop.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp taskloop for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp taskloop for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp taskloop collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp taskloop collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp taskloop collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-taskloop.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTaskLoopDirective {{.*}} <line:4:1, col:21> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTaskLoopDirective {{.*}} <line:10:1, col:21> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTaskLoopDirective {{.*}} <line:17:1, col:33> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:22, col:32> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:31> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:31> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTaskLoopDirective {{.*}} <line:24:1, col:33> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:22, col:32> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:31> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:31> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTaskLoopDirective {{.*}} <line:31:1, col:33> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:22, col:32> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:31> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:31> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .lb. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .ub. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .st. 'const long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .liter. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .reductions. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-taskloop.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
GB_unop__lgamma_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lgamma_fp64_fp64) // op(A') function: GB (_unop_tran__lgamma_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = lgamma (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = lgamma (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = lgamma (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LGAMMA || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lgamma_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = lgamma (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = lgamma (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lgamma_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = 20; omp_set_num_threads(Nthreads); //seed value for the randomizer double seed = clock(); //this will make your program run differently everytime //double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n; printf("Enter a number of bits: "); fflush(stdout); char status = scanf("%u",&n); //make sure the input makes sense if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt =((n-1)/8) ; padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("]\n"); //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); volatile int found = 1; #pragma omp parallel for shared(found) for (unsigned int i=0;i<p-1;i++) { if(found == 1){ continue; } if (modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i); found = 0; } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); return 0; }
trsm_x_dia_u_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = A->rows; ALPHA_INT main_diag_pos = 0; int num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT i = 0; i < A->ndiag; i++) if(A->distance[i] == 0) { main_diag_pos = i; } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++) { for (ALPHA_INT r = m - 1; r >= 0; r--) { ALPHA_Number temp; alpha_setzero(temp); for (ALPHA_INT ndiag = main_diag_pos + 1; ndiag < A->ndiag; ndiag++) { if (m - A->distance[ndiag] > r) { ALPHA_INT ac = r + A->distance[ndiag]; alpha_madde(temp, A->values[ndiag * A->lval + r], y[ac * ldy + out_y_col]); } } ALPHA_Number t; alpha_setzero(t); alpha_mul(t, alpha, x[r * ldx + out_y_col]); alpha_sub(y[r * ldy + out_y_col], t, temp); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
lowp_SpatialMaxPooling.c
#include <math.h> #include "../thnets.h" static void Lowp_SpatialMaxPooling_updateOutput_frame(unsigned char *input_p, unsigned char *output_p, long nslices, long iwidth, long iheight, long owidth, long oheight, int kW, int kH, int dW, int dH, int padW, int padH) { long k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { unsigned char *ip = input_p + k*iwidth*iheight; unsigned char *op = output_p + k*owidth*oheight; long i, j; for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { long hstart = i * dH - padH; long wstart = j * dW - padW; long hend = thfminf(hstart + kH, iheight); long wend = thfminf(wstart + kW, iwidth); hstart = thfmaxf(hstart, 0); wstart = thfmaxf(wstart, 0); unsigned char maxval = 0; long x, y; for (y = hstart; y < hend; y++) { for (x = wstart; x < wend; x++) { unsigned char val = *(ip + y*iwidth + x); if (val > maxval) maxval = val; } } *(op + i*owidth + j) = maxval; } } } } THFloatTensor *Lowp_SpatialMaxPooling_updateOutput(struct module *module, THFloatTensor *input) { int kW = module->SpatialMaxPooling.kW; int kH = module->SpatialMaxPooling.kH; int dW = module->SpatialMaxPooling.dW; int dH = module->SpatialMaxPooling.dH; int padW = module->SpatialMaxPooling.padW; int padH = module->SpatialMaxPooling.padH; int ceil_mode = module->SpatialMaxPooling.ceil_mode; THFloatTensor *output = module->output; int batch = 1; if (input->nDimension == 3) { batch = 0; THFloatTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]); } long batchSize = input->size[0]; long nslices = input->size[1]; long iheight = input->size[2]; long iwidth = input->size[3]; module->SpatialMaxPooling.iwidth = iwidth; module->SpatialMaxPooling.iheight = iheight; long oheight; long owidth; if (ceil_mode) { oheight = (long)(ceil((float)(iheight - kH + 2*padH) / dH)) + 1; owidth = (long)(ceil((float)(iwidth - kW + 2*padW) / dW)) + 1; } else { oheight = (long)(floor((float)(iheight - kH + 2*padH) / dH)) + 1; owidth = (long)(floor((float)(iwidth - kW + 2*padW) / dW)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image if ((oheight - 1)*dH >= iheight + padH) --oheight; if ((owidth - 1)*dW >= iwidth + padW) --owidth; } THLowpTensor_resize4d(output, batchSize, nslices, oheight, owidth); unsigned char *input_data = (unsigned char *)THFloatTensor_data(input); unsigned char *output_data = (unsigned char *)THFloatTensor_data(output); long p; #pragma omp parallel for private(p) for (p = 0; p < batchSize; p++) { Lowp_SpatialMaxPooling_updateOutput_frame(input_data+p*nslices*iwidth*iheight, output_data+p*nslices*owidth*oheight, nslices, iwidth, iheight, owidth, oheight, kW, kH, dW, dH, padW, padH); } if (batch == 0) { THLowpTensor_resize3d(output, nslices, oheight, owidth); THLowpTensor_resize3d(input, nslices, iheight, iwidth); } output->mult = input->mult; output->sub = input->sub; return output; }
layer.h
// == mojo ==================================================================== // // Copyright (c) gnawice@gnawice.com. All rights reserved. // See LICENSE in root folder // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files(the "Software"), // to deal in the Software without restriction, including without // limitation the rights to use, copy, modify, merge, publish, distribute, // sublicense, and/or sell copies of the Software, and to permit persons to // whom the Software is furnished to do so, subject to the following // conditions : // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT // OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR // THE USE OR OTHER DEALINGS IN THE SOFTWARE. // // ============================================================================ // layer.h: defines layers for neural network // ==================================================================== mojo == #pragma once #include <string> #include <sstream> #include "core_math.h" #include "activation.h" namespace mojo { #include <windows.h> /* double PCFreq = 0.0; __int64 CounterStart = 0; void StartCounter() { LARGE_INTEGER li; if (!QueryPerformanceFrequency(&li)) return; PCFreq = double(li.QuadPart) / 1000.0; QueryPerformanceCounter(&li); CounterStart = li.QuadPart; } double GetCounter() { LARGE_INTEGER li; QueryPerformanceCounter(&li); return double(li.QuadPart - CounterStart) / PCFreq; } */ #define int2str(a) std::to_string((long long)a) #define float2str(a) std::to_string((long double)a) #define bail(txt) {std::cerr << txt; throw;} //---------------------------------------------------------------------------------------------------------- // B A S E L A Y E R // // all other layers derived from this class base_layer { protected: bool _has_weights; bool _use_bias; float _learning_factor; int _thread_count; public: activation_function *p_act; bool has_weights() {return _has_weights;} bool use_bias() { return _use_bias; } void set_learning_factor(float f=1.0f) {_learning_factor = 1.f;} void set_threading(int thread_count) {_thread_count=thread_count; if(_thread_count<1) _thread_count=1;} int pad_cols, pad_rows; matrix node; matrix bias; // this is something that maybe should be in the same class as the weights... but whatever. handled differently for different layers std::string name; // index of W matrix, index of connected layer std::vector<std::pair<int,base_layer*>> forward_linked_layers; #ifndef MOJO_NO_TRAINING matrix delta; std::vector<std::pair<int,base_layer*>> backward_linked_layers; virtual void distribute_delta(base_layer &top, const matrix &w, const int train = 1) =0; virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train =1)=0; virtual void update_bias(const matrix &newbias, float alpha) {}; #endif virtual void accumulate_signal(const base_layer &top_node, const matrix &w, const int train =0) =0; base_layer(const char* layer_name, int _w, int _h=1, int _c=1) : node(_w, _h, _c), p_act(NULL), name(layer_name), _has_weights(true), pad_cols(0), pad_rows(0), _learning_factor(1.f), _use_bias(false), _thread_count(1) #ifndef MOJO_NO_TRAINING ,delta(_w,_h,_c,NULL,false) #endif { } virtual void resize(int _w, int _h=1, int _c=1) { if (_w<1) _w = 1; if (_h<1) _h = 1; if (_c<1) _c = 1; node =matrix(_w,_h,_c); if (_use_bias) { bias = matrix(_w, _h, _c); bias.fill(0.); } #ifndef MOJO_NO_TRAINING delta =matrix(_w,_h,_c,NULL,false); #endif } virtual ~base_layer(){if(p_act) delete p_act;} virtual int fan_size() {return node.chans*node.rows*node.cols;} virtual void activate_nodes() { if (p_act) { if(_use_bias) //for (int c=0; c<node.chans; c++) { //const float b = bias.x[c]; //float *x= &node.x[c*node.chan_stride]; p_act->f(node.x,node.size(),bias.x); } else p_act->f(node.x, node.size(), 0); } } virtual matrix * new_connection(base_layer &top, int weight_mat_index) { top.forward_linked_layers.push_back(std::make_pair((int)weight_mat_index,this)); #ifndef MOJO_NO_TRAINING backward_linked_layers.push_back(std::make_pair((int)weight_mat_index,&top)); #endif if (_has_weights) { int rows = node.cols*node.rows*node.chans; int cols = top.node.cols*top.node.rows*top.node.chans; return new matrix(cols, rows, 1); } else return NULL; } //inline float f(float *in, int i, int size, float bias) {return p_act->f(in, i, size, bias);}; inline float df(float *in, int i, int size) { if (p_act) return p_act->df(in, i, size); else return 1.f; }; virtual std::string get_config_string() =0; }; //---------------------------------------------------------------------------------------------------------- // I N P U T L A Y E R // // input layer class - can be 1D, 2D (c=1), or stacked 2D (c>1) class input_layer : public base_layer { public: input_layer(const char *layer_name, int _w, int _h=1, int _c=1) : base_layer(layer_name,_w,_h,_c) {p_act=new_activation_function("identity"); } virtual ~input_layer(){} virtual void activate_nodes() { /*node.reset_empty_chans(); */} virtual void distribute_delta(base_layer &top, const matrix &w, const int train =1) {} virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train =1) {} virtual void accumulate_signal(const base_layer &top_node, const matrix &w, const int train =0) {} virtual std::string get_config_string() {std::string str="input "+int2str(node.cols)+" "+int2str(node.rows)+" "+int2str(node.chans)+ " "+p_act->name+"\n"; return str;} }; //---------------------------------------------------------------------------------------------------------- // F U L L Y C O N N E C T E D // // fully connected layer class fully_connected_layer : public base_layer { public: fully_connected_layer(const char *layer_name, int _size, activation_function *p) : base_layer(layer_name, _size, 1, 1) { p_act = p; _use_bias = true; bias = matrix(node.cols, node.rows, node.chans); bias.fill(0.); }//layer_type=fully_connected_type;} virtual std::string get_config_string() {std::string str="fully_connected "+int2str(node.size())+ " "+p_act->name+"\n"; return str;} virtual void accumulate_signal( const base_layer &top,const matrix &w, const int train =0) { // doesn't care if shape is not 1D // here weights are formated in matrix, top node in cols, bottom node along rows. (note that my top is opposite of traditional understanding) // node += top.node.dot_1dx2d(w); const int s = w.rows; const int ts = top.node.size(); const int ts2 = top.node.cols*top.node.rows; // this can be sped up a little with SSE. if(top.node.chan_stride!=ts2) { //std::cout << "here: " << top.node.chan_stride << ","<< ts2 << ","<< top.node.chans << ":"; MOJO_THREAD_THIS_LOOP(_thread_count) for (int j = 0; j < s; j++) { for (int i = 0; i < top.node.chans; i++) { node.x[j] += dot(top.node.x+top.node.chan_stride*i, w.x+j*w.cols+ts2*i, ts2); //float *f=top.node.x+top.node.chan_stride*i; //if(node.x[j]!=node.x[j]) if(node.x[j]!=node.x[j]) { //std::cout << "stuff" << top.name << " " << name << " " << top.node.x[top.node.chan_stride*i] << " " << w.x[j*w.cols+ts2*i] << " | " ; for (int k=0; k<top.node.size(); k++) { std::cout << k<< ","<< top.node.x[k] <<","; } exit(1); } } } } else { MOJO_THREAD_THIS_LOOP(_thread_count) for (int j = 0; j < s; j++) node.x[j] += dot(top.node.x, w.x+j*w.cols, ts); } } #ifndef MOJO_NO_TRAINING virtual void update_bias(const matrix &newbias, float alpha) { for (int j = 0; j < bias.size(); j++) bias.x[j] -= newbias.x[j] * alpha; } virtual void distribute_delta(base_layer &top, const matrix &w, const int train =1) { if(top.delta.cols*top.delta.rows==top.delta.chan_stride) { const int w_cols = w.cols; for (int b = 0; b < delta.size(); b++) { const float cb = delta.x[b]; for (int t = 0; t < top.delta.size(); t++) top.delta.x[t] += cb*w.x[t + b*w_cols]; } } else { const int w_cols = w.cols; const int chan_size=top.delta.cols*top.delta.rows; for (int b = 0; b < delta.size(); b++) { const float cb = delta.x[b]; for (int tc = 0; tc < top.delta.chans; tc++) for (int t = 0; t < chan_size; t++) top.delta.x[t+tc*top.delta.chan_stride] += cb*w.x[t + tc*chan_size + b*w_cols]; } } } virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train = 1) { const float *bottom = delta.x; const int sizeb = delta.size(); const float *top = top_layer.node.x; const int sizet = top_layer.node.cols*top_layer.node.rows*top_layer.node.chans; dw.resize(sizet, sizeb, 1); for (int b = 0; b < sizeb; b++) { const float cb = bottom[b]; const int chan_size = top_layer.node.cols*top_layer.node.rows; if(sizet!=top_layer.node.size()) { //std::cout << "calculate_dw - odd size"; for (int tc = 0; tc < top_layer.node.chans; tc++) for (int t = 0; t < chan_size; t++) { dw.x[t+tc*chan_size + b*sizet] = top[t+tc*top_layer.node.chan_stride] * cb; //std::cout << dw.x[t+tc*chan_size + b*sizet] <<","; } } else { for (int t = 0; t < sizet; t++) dw.x[t + b*sizet] = top[t] * cb; } } } #endif }; //---------------------------------------------------------------------------------------------------------- // M A X P O O L I N G // // may split to max and ave pool class derived from pooling layer.. but i never use ave pool anymore class max_pooling_layer : public base_layer { protected: int _pool_size; int _stride; // uses a map to connect pooled result to top layer std::vector<int> _max_map; public: max_pooling_layer(const char *layer_name, int pool_size) : base_layer(layer_name, 1) { _stride = pool_size; _pool_size = pool_size; //layer_type=pool_type; _has_weights = false; } max_pooling_layer(const char *layer_name, int pool_size, int stride ) : base_layer(layer_name, 1) { _stride= stride; _pool_size=pool_size; //layer_type=pool_type; _has_weights = false; } virtual ~max_pooling_layer(){} virtual std::string get_config_string() {std::string str="max_pool "+int2str(_pool_size) +" "+ int2str(_stride) +"\n"; return str;} // ToDo would like delayed activation of conv layer if available // virtual void activate_nodes(){ return;} virtual void resize(int _w, int _h=1, int _c=1) { if(_w<1) _w=1; if(_h<1) _h=1; if(_c<1) _c=1; _max_map.resize(_w*_h*_c); base_layer::resize(_w, _h, _c); } // no weights virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train =1) {} virtual matrix * new_connection(base_layer &top, int weight_mat_index) { // need to set the size of this layer // can really only handle one connection comming in to this int pool_size = _pool_size; int w = (top.node.cols) / pool_size; int h = (top.node.rows) / pool_size; if (_stride != _pool_size) { w = 1 + ((top.node.cols - _pool_size) / _stride); h = 1 + ((top.node.rows - _pool_size) / _stride); } resize(w, h, top.node.chans); return base_layer::new_connection(top, weight_mat_index); } // this is downsampling // the pool size must fit correctly in the image map (use resize prior to call if this isn't the case) virtual void accumulate_signal(const base_layer &top,const matrix &w,const int train =0) { int kstep = top.node.chan_stride; // top.node.cols*top.node.rows; int jstep=top.node.cols; int output_index=0; int *p_map = _max_map.data(); int pool_y=_pool_size; if(top.node.rows==1) pool_y=1; //-top.pad_rows*2==1) pool_y=1; int pool_x=_pool_size; if(top.node.cols==1) pool_x=1;//-top.pad_cols*2==1) pool_x=1; const float *top_node = top.node.x; for(int k=0; k<top.node.chans; k++) { for(int j=0; j<=top.node.rows- _pool_size; j+= _stride) { for(int i=0; i<=top.node.cols- _pool_size; i+= _stride) { const int base_index=i+(j)*jstep+k*kstep; int max_i=base_index; float max=top_node[base_index]; if(pool_x==2) { const float *n=top_node+base_index; //if(max<n[0]) { max = n[0]; max_i=max_i;} if(max<n[1]) { max = n[1]; max_i=base_index+1;} n+=jstep; if(max<n[0]) { max = n[0]; max_i=base_index+jstep;} if(max<n[1]) { max = n[1]; max_i=base_index+jstep+1;} } else if(pool_x==3) { const float *n=top_node+base_index; //if(max<n[0]) { max = n[0]; max_i=max_i;} if(max<n[1]) { max = n[1]; max_i=base_index+1;} if(max<n[2]) { max = n[2]; max_i=base_index+2;} n+=jstep; if(max<n[0]) { max = n[0]; max_i=base_index+jstep;} if(max<n[1]) { max = n[1]; max_i=base_index+jstep+1;} if(max<n[2]) { max = n[2]; max_i=base_index+jstep+2;} n+=jstep; if(max<n[0]) { max = n[0]; max_i=base_index+2*jstep;} if(max<n[1]) { max = n[1]; max_i=base_index+2*jstep+1;} if(max<n[2]) { max = n[2]; max_i=base_index+2*jstep+2;} } else if(pool_x==4) { const float *n=top_node+base_index; //if(max<n[0]) { max = n[0]; max_i=max_i;} if(max<n[1]) { max = n[1]; max_i=base_index+1;} if(max<n[2]) { max = n[2]; max_i=base_index+2;} if(max<n[3]) { max = n[3]; max_i=base_index+3;} n+=jstep; if(max<n[0]) { max = n[0]; max_i=base_index+jstep;} if(max<n[1]) { max = n[1]; max_i=base_index+jstep+1;} if(max<n[2]) { max = n[2]; max_i=base_index+jstep+2;} if(max<n[3]) { max = n[3]; max_i=base_index+jstep+3;} n+=jstep; if(max<n[0]) { max = n[0]; max_i=base_index+2*jstep;} if(max<n[1]) { max = n[1]; max_i=base_index+2*jstep+1;} if(max<n[2]) { max = n[2]; max_i=base_index+2*jstep+2;} if(max<n[3]) { max = n[3]; max_i=base_index+2*jstep+3;} n+=jstep; if(max<n[0]) { max = n[0]; max_i=base_index+3*jstep;} if(max<n[1]) { max = n[1]; max_i=base_index+3*jstep+1;} if(max<n[2]) { max = n[2]; max_i=base_index+3*jstep+2;} if(max<n[3]) { max = n[3]; max_i=base_index+3*jstep+3;} } else { // speed up with optimized size version for(int jj=0; jj<pool_y; jj+= 1) { for(int ii=0; ii<pool_x; ii+= 1) { int index=i+ii+(j+jj)*jstep+k*kstep; if((max)<(top_node[index])) { max = top_node[index]; max_i=index; } } } } //if (max<1e-5) node.empty_chan[k] = 1; //else node.empty_chan[k] = 0; node.x[output_index] = top_node[max_i]; p_map[output_index] = max_i; output_index++; } } } } #ifndef MOJO_NO_TRAINING // this is upsampling virtual void distribute_delta(base_layer &top, const matrix &w, const int train =1) { int *p_map = _max_map.data(); const int s = (int)_max_map.size(); for(int k=0; k<s; k++) top.delta.x[p_map[k]]+=delta.x[k]; } #endif }; //---------------------------------------------------------------------------------------------------------- // S E M I S T O C H A S T I C P O O L I N G // concept similar to stochastic pooling but only slects 'max' based on top 2 candidates class semi_stochastic_pooling_layer : public max_pooling_layer { public: semi_stochastic_pooling_layer(const char *layer_name, int pool_size) : max_pooling_layer(layer_name, pool_size) {} semi_stochastic_pooling_layer(const char *layer_name, int pool_size, int stride) : max_pooling_layer(layer_name, pool_size, stride){} virtual std::string get_config_string() { std::string str = "semi_stochastic_pool " + int2str(_pool_size) + " " + int2str(_stride) + "\n"; return str; } virtual void accumulate_signal(const base_layer &top, const matrix &w, const int train = 0) { int kstep = top.node.cols*top.node.rows; int jstep = top.node.cols; int output_index = 0; int *p_map = _max_map.data(); int pool_y = _pool_size; if (top.node.rows == 1) pool_y = 1; //-top.pad_rows*2==1) pool_y=1; int pool_x = _pool_size; if (top.node.cols == 1) pool_x = 1;//-top.pad_cols*2==1) pool_x=1; const float *top_node = top.node.x; for (int k = 0; k<top.node.chans; k++) { for (int j = 0; j <= top.node.rows - _pool_size; j += _stride) { for (int i = 0; i <= top.node.cols - _pool_size; i += _stride) { const int base_index = i + (j)*jstep + k*kstep; int max_i = base_index; float max = top_node[base_index]; int max2_i = base_index; float max2 = max; // speed up with optimized size version for (int jj = 0; jj < pool_y; jj += 1) { for (int ii = 0; ii < pool_x; ii += 1) { int index = i + ii + (j + jj)*jstep + k*kstep; if ((max) < (top_node[index])) { max2 = max; max2_i = max_i; max = top_node[index]; max_i = index; } else if ((max2) < (top_node[index])) { max2 = top_node[index]; max2_i = index; } } } // if(max<1e-5) node.empty_chan[k] = 1; // else node.empty_chan[k] = 0; int r = rand() % 100; float denom = (max + max2); if (denom == 0) { node.x[output_index] = top_node[max_i]; p_map[output_index] = max_i; } else { int t1 = (int)(100 * max / (max + max2)); if (r <= t1 || train == 0) { node.x[output_index] = top_node[max_i]; p_map[output_index] = max_i; } else { node.x[output_index] = top_node[max2_i]; p_map[output_index] = max2_i; } } output_index++; } } } } }; //---------------------------------------------------------------------------------------------------------- // D R O P O U T // class dropout_layer : public base_layer { float _dropout_rate; //std::map<const base_layer*, matrix> drop_mask; matrix drop_mask; public: dropout_layer(const char *layer_name, float dropout_rate) : base_layer(layer_name, 1) { _has_weights = false; _dropout_rate = dropout_rate; p_act = NULL;// new_activation_function("identity"); } virtual ~dropout_layer() {} virtual std::string get_config_string() { std::string str = "dropout " + float2str(_dropout_rate)+"\n"; return str; } virtual void resize(int _w, int _h = 1, int _c = 1) { if (_w<1) _w = 1; if (_h<1) _h = 1; if (_c<1) _c = 1; drop_mask.resize(_w, _h, _c); base_layer::resize(_w, _h, _c); } // no weights virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train = 1) {} virtual matrix * new_connection(base_layer &top, int weight_mat_index) { resize(top.node.cols, top.node.rows, top.node.chans); return base_layer::new_connection(top, weight_mat_index); } // for dropout... // we know this is called first in the backward pass, and the train will be set to 1 // when that happens the dropouts will be set. // different dropouts for each mininbatch... don't know if that matters... virtual void accumulate_signal(const base_layer &top, const matrix &w, const int train = 0) { const float *top_node = top.node.x; const int size = top.node.chans*top.node.rows*top.node.cols; memcpy(node.x, top_node, sizeof(float)*size); // matrix *pmask = &(drop_mask[&top]); matrix *pmask = &drop_mask; if (train) { pmask->fill(1); int k; for (k = 0; k < size; k+=4) // do 4 at a time { int r = rand(); if ((r % 100) <= (_dropout_rate*100.f)) { pmask->x[k] = 0.0; node.x[k] *= 0.5f; }; if (((r >> 1) % 100) <= (_dropout_rate*100.f)) { pmask->x[k + 1] = 0.0; node.x[k + 1] *= 0.5f; } if (((r >> 2) % 100) <= (_dropout_rate*100.f)) { pmask->x[k + 2] = 0.0; node.x[k + 2] *= 0.5f; } if (((r >> 3) % 100) <= (_dropout_rate*100.f)) { pmask->x[k + 3] = 0.0; node.x[k + 3] *= 0.5f; } } int k2 = k - 4; for (k = k2; k < size; k++) { int r = rand(); if ((r % 100) <= (_dropout_rate*100.f)) { pmask->x[k] = 0.0; node.x[k] *= 0.5f; }; } } } #ifndef MOJO_NO_TRAINING virtual void distribute_delta(base_layer &top, const matrix &w, const int train = 1) { // delta *= drop_mask[&top]; delta *= drop_mask; top.delta += delta; } #endif }; //---------------------------------------------------------------------------------------------------------- // M F M - M a x F e a t u r e M a p // (A Lightened CNN for Deep Face Representation) http://arxiv.org/pdf/1511.02683.pdf // the parameter passed in is the number of maps pooled class maxout_layer : public base_layer { int _pool; matrix max_map; public: maxout_layer(const char *layer_name, int pool_chans) : base_layer(layer_name, 1) { _pool = pool_chans; if (_pool < 2) _pool = 2; p_act = new_activation_function("identity"); _has_weights = false; } virtual ~maxout_layer() {} virtual std::string get_config_string() { std::string str = "mfm" + int2str(_pool) + "\n"; return str; } virtual void resize(int _w, int _h = 1, int _c = 1) { _c /= _pool; if (_w<1) _w = 1; if (_h<1) _h = 1; if (_c<1) _c = 1; max_map.resize(_w, _h, _c); base_layer::resize(_w, _h, _c); } inline float df(float *in, int i, int size) { return 1.; }; virtual void activate_nodes() { return; } // no weights virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train = 1) {} virtual matrix * new_connection(base_layer &top, int weight_mat_index) { // wasteful to add weight matrix (1x1x1), but makes other parts of code more OO // bad will happen if try to put more than one pool layer top.forward_linked_layers.push_back(std::make_pair(weight_mat_index, this)); int w = (top.node.cols) / 1; int h = (top.node.rows) / 1; resize(w, h, top.node.chans); #ifndef MOJO_NO_TRAINING backward_linked_layers.push_back(std::make_pair(weight_mat_index, &top)); #endif return NULL; //return new matrix(1, 1, 1); } // for maxout // we know this is called first in the backward pass, and the train will be set to 1 // when that happens the dropouts will be set. // different dropouts for each mininbatch... don't know if that matters... virtual void accumulate_signal(const base_layer &top, const matrix &w, const int train = 0) { const float *top_node = top.node.x; const int chan_size = top.node.rows*top.node.cols; //const int pool_offset = top.node.chans / _pool; const int s = chan_size*top.node.chans / _pool; if((top.node.chans % _pool) !=0) bail("mfm layer has pool size that is not a multiple of the input channels"); for (int i = 0; i < s; i++) { float max = top.node.x[i]; int maxk = i; for (int k = 1; k < _pool; k++) { if (top.node.x[i + (k*s)] > max) { //node.x[i + c / 2 * chan_size] = max; max = top.node.x[i + (k*s)]; maxk = i + (k*s); // max_map tells which map 0 or 1 when pooling //max_map.x[i + c / 2 * chan_size] = 0; } } node.x[i] = max; max_map.x[i] = (float)maxk; } } #ifndef MOJO_NO_TRAINING virtual void distribute_delta(base_layer &top, const matrix &w, const int train = 1) { // const int chan_size = node.cols*node.rows; // const int pool_offset = top.node.chans / _pool; const int chan_size = top.node.rows*top.node.cols; //const int pool_offset = top.node.chans / _pool; const int s = chan_size*top.node.chans / _pool; for (int c = 0; c < s; c++) { // for (int k = 0; k < node.cols*node.rows; k++) // { int maxmap = (int)max_map.x[c]; top.delta.x[maxmap] += delta.x[c]; // } } } #endif }; //---------------------------------------------------------------------------------------------------------- // C O N V O L U T I O N // class convolution_layer : public base_layer { int _stride; public: int kernel_rows; int kernel_cols; int maps; //int maps_per_kernel; int kernels_per_map; convolution_layer(const char *layer_name, int _w, int _c, int _s, activation_function *p ) : base_layer(layer_name, _w, _w, _c) { p_act=p; _stride =_s; kernel_rows=_w; kernel_cols=_w; maps=_c;kernels_per_map=0; pad_cols = kernel_cols-1; pad_rows = kernel_rows-1; _use_bias = true; } virtual ~convolution_layer() { } virtual std::string get_config_string() {std::string str="convolution "+int2str(kernel_cols)+" "+int2str(maps)+" " + int2str(_stride) + " " +p_act->name+"\n"; return str;} virtual int fan_size() { return kernel_rows*kernel_cols*maps *kernels_per_map; } virtual void resize(int _w, int _h=1, int _c=1) // special resize nodes because bias handled differently with shared wts { if(kernel_rows*kernel_cols==1) node =matrix(_w,_h,_c); /// use special channel aligned matrix object else node =matrix(_w,_h,_c,NULL,true); /// use special channel aligned matrix object bias =matrix(1,1,_c); bias.fill(0.); #ifndef MOJO_NO_TRAINING if(kernel_rows*kernel_cols==1) delta =matrix(_w,_h,_c); /// use special channel aligned matrix object else delta =matrix(_w,_h,_c,NULL,true); /// use special channel aligned matrix object #endif } // this connection work won't work with multiple top layers (yet) virtual matrix * new_connection(base_layer &top, int weight_mat_index) { top.forward_linked_layers.push_back(std::make_pair(weight_mat_index,this)); #ifndef MOJO_NO_TRAINING backward_linked_layers.push_back(std::make_pair(weight_mat_index,&top)); #endif // re-shuffle these things so weights of size kernel w,h,kerns - node of size see below //int total_kernels=top.node.chans*node.chans; kernels_per_map += top.node.chans; resize((top.node.cols-kernel_cols)/_stride+1, (top.node.rows-kernel_rows)/_stride+1, maps); return new matrix(kernel_cols,kernel_rows, maps*kernels_per_map); } // activate_nodes virtual void activate_nodes() { const int map_size = node.rows*node.cols; const int map_stride = node.chan_stride; const int _maps = maps; MOJO_THREAD_THIS_LOOP(_thread_count) for (int c=0; c<_maps; c++) { p_act->fc(&node.x[c*map_stride],map_size,bias.x[c]); //if(node.x[c*map_stride]!=node.x[c*map_stride]) bail("activate"); } } virtual void accumulate_signal( const base_layer &top, const matrix &w, const int train =0) { const int kstep = top.node.chan_stride;// NOT the same as top.node.cols*top.node.rows; const int jstep=top.node.cols; //int output_index=0; const int kernel_size=kernel_cols*kernel_rows; const int kernel_map_step = kernel_size*kernels_per_map; const int map_size=node.cols*node.rows; const int map_stride = node.chan_stride; const float *_w = w.x; const int top_chans = top.node.chans; const int map_cnt=maps; const int w_size = kernel_cols; const int stride = _stride; const int node_size= node.cols; const int top_node_size = top.node.cols; const int outsize = node_size*node_size; if(kernel_rows>=2 && (kernel_rows<=5)) { matrix img_ptr(node_size, node_size, kernel_rows*kernel_rows, NULL, true); for (int k = 0; k < top_chans; k++) // input channels --- same as kernels_per_map - kern for each input { unwrap_aligned_NxN(kernel_rows, img_ptr.x, &top.node.x[k*kstep], jstep, stride); float *ww = &w.x[(0 + k*maps)*kernel_size]; if(kernel_rows==2) { MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count) for (int map = 0; map < map_cnt; map+=1) dotsum_unwrapped_2x2(img_ptr.x, ww+map*kernel_size, node.x + map_stride*map, outsize); } else if(kernel_rows==3) { MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count) for (int map = 0; map < map_cnt; map+=1) dotsum_unwrapped_3x3(img_ptr.x, ww+map*kernel_size, node.x + map_stride*map, outsize); } else if(kernel_rows==4) { MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count) for (int map = 0; map < map_cnt; map+=1) dotsum_unwrapped_4x4(img_ptr.x, ww+map*kernel_size, node.x + map_stride*map, outsize); } else //(kernel_rows==5) { MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count) for (int map = 0; map < map_cnt; map+=1) dotsum_unwrapped_5x5(img_ptr.x, ww+map*kernel_size, node.x + map_stride*map, outsize); } } } else if (kernel_rows == 1) { for (int k = 0; k < top_chans; k++) // input channels --- same as kernels_per_map - kern for each input { const float *_top_node = &top.node.x[k*kstep]; //MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count) for (int map = 0; map < map_cnt; map++) { const float cw = w.x[(map + k*maps)*kernel_size]; const int mapoff = map_size*map; for (int j = 0; j < node_size*node_size; j += stride) node.x[j + mapoff] += _top_node[j] * cw; } } } else { for(int map=0; map<maps; map++) // how many maps maps= node.chans { for(int k=0; k<top_chans; k++) // input channels --- same as kernels_per_map - kern for each input { MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count) for(int j=0; j<node_size; j+= stride) // input h for(int i=0; i<node_size; i+= stride) // intput w node.x[i+(j)*node.cols +map_stride*map]+= unwrap_2d_dot( &top.node.x[(i)+(j)*jstep + k*kstep], &w.x[(map+k*maps)*kernel_size], kernel_cols, jstep,kernel_cols); } // k } // all maps=chans } } #ifndef MOJO_NO_TRAINING // convolution::distribute_delta virtual void distribute_delta(base_layer &top, const matrix &w, const int train=1) { // here to calculate top_delta += bottom_delta * W // top_delta.x[s] += bottom_delta.x[t]*w.x[s+t*w.cols]; matrix delta_pad(delta, pad_cols, pad_rows); //const int kstep=top.delta.cols*top.delta.rows; const int kstep=top.delta.chan_stride; const int jstep=top.delta.cols; const int output_index=0; const int kernel_size=kernel_cols*kernel_rows; const int kernel_map_step = kernel_size*kernels_per_map; const int map_size=delta_pad.cols*delta_pad.rows; const int map_stride=delta_pad.chan_stride; const float *_w = w.x; const int w_size = kernel_cols; const int delta_size = delta_pad.cols; const int map_cnt=maps; const int top_delta_size = top.delta.rows; const int top_delta_chans = top.delta.chans; const int stride = _stride; matrix delt(top.delta.cols, top.delta.rows, top.delta.chans,NULL,true); if (kernel_cols == 5) { //* matrix img_ptr(delta_size, delta_size, 25, NULL, true); matrix filter_ptr(28, 1); //matrix imgout_ptr(outsize + 7, 1); for (int map = 0; map < map_cnt; map+=1) // how many maps maps= node.chans { unwrap_aligned_NxN(5, img_ptr.x, &delta_pad.x[map*map_stride], delta_size, stride); const int outsize = top_delta_size*top_delta_size; for (int k = 0; k < top_delta_chans; k++) // input channels --- same as kernels_per_map - kern for each input { _w = &w.x[(k*maps + map)*kernel_size]; // flip-flip to make 180 version for (int ii = 0; ii < 25; ii++) filter_ptr.x[ii] = _w[24 - ii]; //float *out = node.x + map_stride*map; //float *out = &top.delta.x[k*kstep]; float *out = &delt.x[k*delt.chan_stride]; memcpy(out,&top.delta.x[k*kstep],sizeof(float)*outsize); dotsum_unwrapped_5x5(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize); memcpy(&top.delta.x[k*kstep],out,sizeof(float)*outsize); } } /*/ matrix filter_ptr(28, 1); matrix img_ptr(28 * delta_size*delta_size, 1); matrix imgout_ptr(delta_size*delta_size, 1); for (int map = 0; map < map_cnt; map++) // how many maps maps= node.chans { unwrap_aligned_5x5(img_ptr.x, &delta_pad.x[map*map_stride], delta_size, stride); const int outsize = top_delta_size*top_delta_size; for (int k = 0; k < top_delta_chans; k++) // input channels --- same as kernels_per_map - kern for each input { _w = &w.x[(k*maps + map)*kernel_size]; // flip-flip to make 180 version for (int ii = 0; ii < 25; ii++) filter_ptr.x[ii] = _w[24 - ii]; dot_unwrapped_5x5(img_ptr.x, filter_ptr.x, imgout_ptr.x, outsize); float *out = &top.delta.x[k*kstep]; for (int j = 0; j < outsize; j++) out[j] += imgout_ptr.x[j]; } } //*/ // return; } else if(kernel_cols==3) { matrix img_ptr(delta_size, delta_size, 9, NULL, true); matrix filter_ptr(9, 1); //matrix imgout_ptr(outsize + 7, 1); for (int map = 0; map < map_cnt; map+=1) // how many maps maps= node.chans { unwrap_aligned_NxN(3, img_ptr.x, &delta_pad.x[map*map_stride], delta_size, stride); const int outsize = top_delta_size*top_delta_size; for (int k = 0; k < top_delta_chans; k++) // input channels --- same as kernels_per_map - kern for each input { _w = &w.x[(k*maps + map)*kernel_size]; // flip-flip to make 180 version for (int ii = 0; ii < 9; ii++) filter_ptr.x[ii] = _w[8 - ii]; //float *out = node.x + map_stride*map; // float *out = &top.delta.x[k*kstep]; // dotsum_unwrapped_3x3(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize); float *out = &delt.x[k*delt.chan_stride]; memcpy(out,&top.delta.x[k*kstep],sizeof(float)*outsize); dotsum_unwrapped_3x3(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize); memcpy(&top.delta.x[k*kstep],out,sizeof(float)*outsize); } } } else if (kernel_cols == 2) { matrix img_ptr(delta_size, delta_size, 4, NULL, true); matrix filter_ptr(4, 1); matrix out_aligned(top_delta_size,top_delta_size,1,NULL,true); //matrix imgout_ptr(outsize + 7, 1); for (int map = 0; map < map_cnt; map+=1) // how many maps maps= node.chans { unwrap_aligned_NxN(2, img_ptr.x, &delta_pad.x[map*map_stride], delta_size, stride); const int outsize = top_delta_size*top_delta_size; for (int k = 0; k < top_delta_chans; k++) // input channels --- same as kernels_per_map - kern for each input { _w = &w.x[(k*maps + map)*kernel_size]; // flip-flip to make 180 version for (int ii = 0; ii < 4; ii++) filter_ptr.x[ii] = _w[3 - ii]; memcpy(out_aligned.x, &top.delta.x[k*kstep],outsize*sizeof(float)); //float *out = node.x + map_stride*map; float *out = out_aligned.x;// &top.delta.x[k*kstep]; dotsum_unwrapped_2x2(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize); memcpy(&top.delta.x[k*kstep],out_aligned.x,outsize*sizeof(float)); } } } else if (kernel_cols == 1) { for (int j = 0; j<top.delta.rows; j += stride) // input h { for (int i = 0; i<top.delta.cols; i += stride) // intput w { for (int k = 0; k<top.delta.chans; k++) // input channels --- same as kernels_per_map - kern for each input { int td_i = i + (j)*jstep + k*kstep; float *delt = &delta_pad.x[i + (j)*delta_pad.cols + 0*map_stride]; float *wx = &w.x[(0 + k*maps)*kernel_size]; for (int map = 0; map<maps; map++) // how many maps maps= node.chans { top.delta.x[td_i] += (*delt) * (*wx); delt += map_stride; wx += kernel_size; } // all input chans //output_index++; } } } //y } else { for(int j=0; j<top.delta.rows; j+=stride) // input h { for(int i=0; i<top.delta.cols; i+=stride) // intput w { for(int k=0; k<top.delta.chans; k++) // input channels --- same as kernels_per_map - kern for each input { int td_i = i+(j)*jstep + k*kstep; for(int map=0; map<maps; map++) // how many maps maps= node.chans { top.delta.x[td_i] += unwrap_2d_dot_rot180( &delta_pad.x[i+(j)*delta_pad.cols + map*map_stride], &w.x[(map+k*maps)*kernel_size], kernel_cols, delta_pad.cols,kernel_cols); } // all input chans //output_index++; } } } //y } // all maps=chans } // convolution::calculate_dw virtual void calculate_dw(const base_layer &top, matrix &dw, const int train =1) { int kstep=top.delta.chan_stride; int jstep=top.delta.cols; int output_index=0; int kernel_size=kernel_cols*kernel_rows; int kernel_map_step = kernel_size*kernels_per_map; int map_size=delta.cols*delta.rows; int map_stride=delta.chan_stride; dw.resize(kernel_cols, kernel_rows,kernels_per_map*maps); dw.fill(0); // node x already init to 0 output_index=0; const int stride = _stride; const int top_node_size= top.node.cols; const int node_size = node.rows; const int delta_size = delta.cols; const int kern_len=kernel_cols; const float *_top; if(kern_len==5) { for(int map=0; map<maps; map++) // how many maps maps= node.chans { const float *_delta =&delta.x[map*map_stride]; for(int k=0; k<top.node.chans; k++) // input channels --- same as kernels_per_map - kern for each input { _top = &top.node.x[k*kstep]; const int w_i = (map+k*maps)*kernel_size; const float *_t=_top; float *_w=dw.x+w_i; _w[0]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[1]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[2]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[3]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[4]+= unwrap_2d_dot( _t, _delta, node_size,top_node_size, delta_size); _t=_top+jstep; _w=dw.x+w_i+kern_len; _w[0]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[1]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[2]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[3]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[4]+= unwrap_2d_dot( _t, _delta, node_size,top_node_size, delta_size); _t=_top+jstep*2; _w=dw.x+w_i+kern_len*2; _w[0]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[1]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[2]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[3]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[4]+= unwrap_2d_dot( _t, _delta, node_size,top_node_size, delta_size); _t=_top+jstep*3; _w=dw.x+w_i+kern_len*3; _w[0]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[1]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[2]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[3]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[4]+= unwrap_2d_dot( _t, _delta, node_size,top_node_size, delta_size); _t=_top+jstep*4; _w=dw.x+w_i+kern_len*4; _w[0]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[1]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[2]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[3]+= unwrap_2d_dot( _t++, _delta, node_size,top_node_size, delta_size); _w[4]+= unwrap_2d_dot( _t, _delta, node_size,top_node_size, delta_size); } //y } // all maps=chans } else if(kern_len==3) { for(int map=0; map<maps; map++) // how many maps maps= node.chans { const float *_delta =&delta.x[map*map_stride]; for(int k=0; k<top.node.chans; k++) // input channels --- same as kernels_per_map - kern for each input { _top = &top.node.x[k*kstep]; const int w_i = (map+k*maps)*kernel_size; dw.x[w_i+0+(0)*kern_len]+= unwrap_2d_dot( _top + 0+(0)*jstep, _delta, node_size,top_node_size, delta_size); dw.x[w_i+1+(0)*kern_len]+= unwrap_2d_dot( _top + 1+(0)*jstep, _delta, node_size,top_node_size, delta_size); dw.x[w_i+2+(0)*kern_len]+= unwrap_2d_dot( _top + 2+(0)*jstep, _delta, node_size,top_node_size, delta_size); dw.x[w_i+0+(1)*kern_len]+= unwrap_2d_dot( _top + 0+(1)*jstep, _delta, node_size,top_node_size, delta_size); dw.x[w_i+1+(1)*kern_len]+= unwrap_2d_dot( _top + 1+(1)*jstep, _delta, node_size,top_node_size, delta_size); dw.x[w_i+2+(1)*kern_len]+= unwrap_2d_dot( _top + 2+(1)*jstep, _delta, node_size,top_node_size, delta_size); dw.x[w_i+0+(2)*kern_len]+= unwrap_2d_dot( _top + 0+(2)*jstep, _delta, node_size,top_node_size, delta_size); dw.x[w_i+1+(2)*kern_len]+= unwrap_2d_dot( _top + 1+(2)*jstep, _delta, node_size,top_node_size, delta_size); dw.x[w_i+2+(2)*kern_len]+= unwrap_2d_dot( _top + 2+(2)*jstep, _delta, node_size,top_node_size, delta_size); } //y } // all maps=chans } else { for(int map=0; map<maps; map++) // how many maps maps= node.chans { const float *_delta =&delta.x[map*map_stride]; for(int k=0; k<top.node.chans; k++) // input channels --- same as kernels_per_map - kern for each input { _top = &top.node.x[k*kstep]; const int w_i = (map+k*maps)*kernel_size; for(int jj=0; jj<kern_len; jj+=1) { for(int ii=0; ii<kern_len; ii+=1) { dw.x[w_i+ii+(jj)*kern_len]+= unwrap_2d_dot( _top + ii+(jj)*jstep, _delta, node_size,top_node_size, delta_size); } // all input chans } // x } //y } // all maps=chans } } #endif }; //---------------------------------------------------------------------------------------------------------- // D E E P C N E T // 2x2 convolution followed by 2x2 max pool // odd number should be in-size, then -1 after convolution and divide by 2 for output size class deepcnet_layer : public base_layer { int _stride; matrix conv_delta; std::vector<int> _max_map; public: int kernel_rows; int kernel_cols; int maps; //int maps_per_kernel; int kernels_per_map; static const int _pool=2; deepcnet_layer(const char *layer_name, int _c, activation_function *p) : base_layer(layer_name, 2, 2, _c) { p_act = p; _stride = 1; kernel_rows = 2; kernel_cols = 2; maps = _c; kernels_per_map = 0; pad_cols = 1; pad_rows = 1; _use_bias = true; } virtual ~deepcnet_layer() {} virtual std::string get_config_string() { std::string str = "deepcnet " + int2str(maps) + " " + p_act->name + "\n"; return str; } virtual int fan_size() { return kernel_rows*kernel_cols*maps *kernels_per_map; } virtual void resize(int _w, int _h = 1, int _c = 1) // special resize nodes because bias handled differently with shared wts { node = matrix(_w, _h, _c); bias = matrix(1, 1, _c); bias.fill(0.); _max_map.resize(_w*_h*_c); conv_delta = matrix(_w*_pool, _h*_pool, maps); #ifndef MOJO_NO_TRAINING delta = matrix(_w, _h, _c, NULL, true); #endif } // this connection work won't work with multiple top layers (yet) virtual matrix * new_connection(base_layer &top, int weight_mat_index) { top.forward_linked_layers.push_back(std::make_pair(weight_mat_index, this)); #ifndef MOJO_NO_TRAINING backward_linked_layers.push_back(std::make_pair(weight_mat_index, &top)); #endif // re-shuffle these things so weights of size kernel w,h,kerns - node of size see below //int total_kernels=top.node.chans*node.chans; kernels_per_map += top.node.chans; resize((top.node.cols - 1) / _pool, (top.node.rows - 1) / _pool, maps); return new matrix(kernel_cols, kernel_rows, maps*kernels_per_map); } // activate_nodes virtual void activate_nodes() { const int map_size = node.rows*node.cols; const int map_stride = node.chan_stride; const int _maps = maps; MOJO_THREAD_THIS_LOOP(_thread_count) for (int c=0; c<_maps; c++) p_act->fc(&node.x[c*map_stride],map_size,bias.x[c]); } virtual void accumulate_signal(const base_layer &top, const matrix &w, const int train = 0) { const int kstep = top.node.chan_stride; const int jstep = top.node.cols; //int output_index=0; const int kernel_size = kernel_cols*kernel_rows; const int kernel_map_step = kernel_size*kernels_per_map; const int pool_map_size = node.cols*node.rows; const int pool_map_stride = node.chan_stride; const float *_w = w.x; const int top_chans = top.node.chans; const int map_cnt = maps; const int w_size = kernel_cols; const int stride = _stride; const int conv_size = node.cols * _pool; const int pool_size = node.cols; const int top_node_size = top.node.cols; const int outsize = pool_size*pool_size; int *p_map = _max_map.data(); matrix imgsum_ptr(jstep-1,jstep-1,maps,NULL,true); imgsum_ptr.fill(0); matrix img_ptr( top.node.cols, top.node.cols, 2*2, NULL, true); //#pragma omp parallel for schedule(guided) num_threads(_thread_count) for (int k = 0; k < top_chans; k++) // input channels --- same as kernels_per_map - kern for each input { unwrap_aligned_NxN(2, img_ptr.x, &top.node.x[k*kstep], jstep, 1); // MOJO_THREAD_THIS_LOOP_DYNAMIC(_thread_count) MOJO_THREAD_THIS_LOOP(_thread_count) for (int map = 0; map < map_cnt; map+=1) // how many maps maps= node.chans { //std::cout << omp_get_thread_num(); float *out = imgsum_ptr.x + imgsum_ptr.chan_stride*map; dotsum_unwrapped_2x2(img_ptr.x, &w.x[(map + k*maps)*kernel_size], out, (jstep-1)*(jstep-1)); } } int idx = 0; for (int map = 0; map < map_cnt; map++) // how many maps maps= node.chans { float *out = node.x + pool_map_stride*map; float *sum = imgsum_ptr.x + imgsum_ptr.chan_stride*map; int cnt=0; for (int j = 0; j < conv_size; j += _pool) { for (int i = 0; i < conv_size; i += _pool) { int maxi = i + j*conv_size; if (sum[maxi] < sum[i + 1 + j*conv_size]) maxi = i + 1 + j*conv_size; if (sum[maxi] < sum[i + (j + 1)*conv_size]) maxi = i + (j + 1)*conv_size; if (sum[maxi] < sum[i + 1 + (j + 1)*conv_size]) maxi = i + 1 + (j + 1)*conv_size; //const int pool_idx = (i + j * pool_size) / _pool; out[cnt] = sum[maxi]; p_map[idx] = maxi+ conv_size*conv_size*map; idx++; cnt++; } } } } #ifndef MOJO_NO_TRAINING // convolution::distribute_delta virtual void distribute_delta(base_layer &top, const matrix &w, const int train = 1) { // here to calculate top_delta += bottom_delta * W // top_delta.x[s] += bottom_delta.x[t]*w.x[s+t*w.cols]; const int kstep = top.delta.chan_stride; const int jstep = top.delta.cols; const int output_index = 0; const int kernel_size = kernel_cols*kernel_rows; const int kernel_map_step = kernel_size*kernels_per_map; const float *_w = w.x; const int w_size = kernel_cols; const int map_cnt = maps; const int top_delta_size = top.delta.rows; const int top_delta_chans = top.delta.chans; const int stride = _stride; //mojo::matrix intermediate_delta(delta.cols * 2, delta.rows * 2, delta.chans); conv_delta.fill(0); int *p_map = _max_map.data(); const int s = (int)_max_map.size(); // put the maxpool result for (int k = 0; k<s; k++) conv_delta.x[p_map[k]] += delta.x[k]; // std::cout << "deepc max"; // for (int i = 0; i < 10; i++) std::cout << delta.x[i] << ","; /// std::cout << "topc max"; // for (int i = 0; i < 10; i++) std::cout << conv_delta.x[i] << ","; matrix delta_pad(conv_delta, pad_cols, pad_rows); const int map_size = delta_pad.cols*delta_pad.rows; const int map_stride = delta_pad.chan_stride; const int delta_size = delta_pad.cols; matrix img_ptr(delta_size, delta_size, 4, NULL, true); matrix filter_ptr(4, 1); matrix delt(top.delta.cols, top.delta.rows, top.delta.chans,NULL,true); //matrix imgout_ptr(outsize + 7, 1); for (int map = 0; map < map_cnt; map+=1) // how many maps maps= node.chans { unwrap_aligned_NxN(2, img_ptr.x, &delta_pad.x[map*map_stride], delta_size, stride); const int outsize = top_delta_size*top_delta_size; for (int k = 0; k < top_delta_chans; k++) // input channels --- same as kernels_per_map - kern for each input { _w = &w.x[(k*maps + map)*kernel_size]; // flip-flip to make 180 version for (int ii = 0; ii < 4; ii++) filter_ptr.x[ii] = _w[3 - ii]; //float *out = node.x + map_stride*map; float *out = &delt.x[k*delt.chan_stride]; memcpy(out,&top.delta.x[k*kstep],sizeof(float)*outsize); dotsum_unwrapped_2x2(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize); memcpy(&top.delta.x[k*kstep],out,sizeof(float)*outsize); // float *out = &top.delta.x[k*kstep]; // dotsum_unwrapped_2x2(img_ptr.x, filter_ptr.x, out, outsize);// imgout_ptr.x, outsize); } } } // convolution::calculate_dw virtual void calculate_dw(const base_layer &top, matrix &dw, const int train = 1) { int kstep = top.delta.cols*top.delta.rows; int jstep = top.delta.cols; int output_index = 0; int kernel_size = kernel_cols*kernel_rows; int kernel_map_step = kernel_size*kernels_per_map; int map_size = conv_delta.cols*conv_delta.rows; dw.resize(kernel_cols, kernel_rows, kernels_per_map*maps); dw.fill(0); // node x already init to 0 output_index = 0; const int stride = _stride; const int top_node_size = top.node.cols; const int delta_size = conv_delta.cols; const int kern_len = kernel_cols; const float *_top; for (int map = 0; map<maps; map++) // how many maps maps= node.chans { const float *_delta = &conv_delta.x[map*map_size]; for (int k = 0; k<top.node.chans; k++) // input channels --- same as kernels_per_map - kern for each input { _top = &top.node.x[k*kstep]; const int w_i = (map + k*maps)*kernel_size; for (int jj = 0; jj<kern_len; jj += 1) { for (int ii = 0; ii<kern_len; ii += 1) { dw.x[w_i + ii + (jj)*kern_len] += unwrap_2d_dot(_top + ii + (jj)*jstep, _delta, delta_size, top_node_size, delta_size); } // all input chans } // x } //y } // all maps=chans } #endif }; //---------------------------------------------------------------------------------------------------------- // C O N C A T E N A T I O N | R E S I Z E | P A D // // puts a set of output maps together and pads to the desired size class concatenation_layer : public base_layer { std::map<const base_layer*, int> layer_to_channel; // name-to-index of layer for layer management int _maps; mojo::pad_type _pad_type; public: concatenation_layer(const char *layer_name, int _w, int _h, mojo::pad_type p= mojo::zero) : base_layer(layer_name, _w, _h) { _maps = 0; _pad_type = p; _has_weights = false; p_act = NULL;// new_activation_function("identity"); } virtual ~concatenation_layer() {} virtual std::string get_config_string() { std::string str_p = " zero\n"; if (_pad_type == mojo::edge) str_p = " edge\n"; else if (_pad_type == mojo::median_edge) str_p = " median_edge\n"; std::string str = "concatenate " + int2str(node.cols) + str_p; return str; } // this connection work won't work with multiple top layers (yet) virtual matrix * new_connection(base_layer &top, int weight_mat_index) { //if (layer_to_channel[&top]) bail("layer already addded to pad layer"); //already exists layer_to_channel[&top] = _maps; _maps += top.node.chans; resize(node.cols, node.rows, _maps); return base_layer::new_connection(top, weight_mat_index); } // no weights virtual void calculate_dw(const base_layer &top_layer, matrix &dw, const int train = 1) {} virtual void accumulate_signal(const base_layer &top, const matrix &w, const int train = 0) { const float *top_node = top.node.x; const int size = node.rows*node.cols; int opadx = node.cols - top.node.cols; int opady = node.rows - top.node.rows; int padx=0, pady=0, padx_ex=0, pady_ex=0; if (opadx > 0) padx = opadx/2; if (opady > 0) pady = opady/2; if (opadx % 2 != 0) { padx_ex = 1; } if (opady % 2 != 0) { pady_ex = 1; } int map_offset = layer_to_channel[&top]; if (padx+ padx_ex > 0 || pady+ pady_ex > 0 ) { matrix m = top.node.pad(padx, pady, padx+ padx_ex, pady+pady_ex, _pad_type, _thread_count); memcpy(node.x + node.chan_stride*map_offset, m.x, sizeof(float)*m.size()); } else if((node.cols == top.node.cols) && (node.rows == top.node.rows)) { memcpy(node.x + node.chan_stride*map_offset, top.node.x, sizeof(float)*top.node.size()); } else { // crop int dx = abs(padx) / 2; int dy = abs(pady) / 2; matrix m = top.node.crop(dx, dy, node.cols, node.rows, _thread_count); memcpy(node.x + node.chan_stride*map_offset, m.x, sizeof(float)*m.size()); } } #ifndef MOJO_NO_TRAINING virtual void distribute_delta(base_layer &top, const matrix &w, const int train = 1) { int map_offset = layer_to_channel[&top]; int padx = node.cols - top.node.cols; int pady = node.rows - top.node.rows; if (padx > 0) padx /= 2; if (pady > 0) pady /= 2; if (padx > 0 || pady > 0) { matrix m = delta.get_chans(map_offset, top.delta.chans); top.delta += m.crop(padx, pady, top.delta.cols, top.delta.rows); } else if ((node.cols == top.node.cols) && (node.rows == top.node.rows)) { top.delta += delta.get_chans(map_offset, top.delta.chans); } else { matrix m = delta.get_chans(map_offset, top.delta.chans); // pad int dx = abs(padx) / 2; int dy = abs(pady) / 2; top.delta += m.pad(dx, dy); } } #endif }; //-------------------------------------------------- // N E W L A Y E R // // "input", "fully_connected","max_pool","convolution","concatination" base_layer *new_layer(const char *layer_name, const char *config) { std::istringstream iss(config); std::string str; iss>>str; int w,h,c,s; if(str.compare("input")==0) { iss>>w; iss>>h; iss>>c; return new input_layer(layer_name, w,h,c); } else if(str.compare("fully_connected")==0) { std::string act; iss>>c; iss>>act; return new fully_connected_layer(layer_name, c, new_activation_function(act)); } else if (str.compare("softmax") == 0) { //std::string act; iss >> c; //iss >> act; return new fully_connected_layer(layer_name, c, new_activation_function("softmax")); } else if(str.compare("max_pool")==0) { iss >> c; iss >> s; if(s>0 && s<=c) return new max_pooling_layer(layer_name, c, s); else return new max_pooling_layer(layer_name, c); } else if (str.compare("mfm") == 0) { iss >> c; return new maxout_layer(layer_name, c); } /* else if (str.compare("activation") == 0) { iss >> s; return new activation_layer(layer_name, s); } */ else if (str.compare("semi_stochastic_pool") == 0) { iss >> c; iss >> s; if (s>0 && s <= c) return new semi_stochastic_pooling_layer(layer_name, c, s); else return new semi_stochastic_pooling_layer(layer_name, c); } else if (str.compare("deepcnet") == 0) { std::string act; iss >> c; iss >> act; return new deepcnet_layer(layer_name, c, new_activation_function(act)); } else if(str.compare("convolution")==0) { std::string act; iss>>w;iss>>c; iss >> s; iss>>act; return new convolution_layer(layer_name, w,c,s, new_activation_function(act)); } else if (str.compare("dropout") == 0) { float fc; iss >> fc; return new dropout_layer(layer_name, fc); } else if((str.compare("resize")==0) || (str.compare("concatenate") == 0)) { std::string pad; iss>>w; iss >> pad; mojo::pad_type p = mojo::zero; if (pad.compare("median") == 0) p = mojo::median_edge; else if (pad.compare("median_edge") == 0) p = mojo::median_edge; else if (pad.compare("edge") == 0) p = mojo::edge; return new concatenation_layer(layer_name, w,w, p); } else { //fprintf(stderr, "ERROR : layer type not valid: '%s'", str); bail("ERROR : layer type not valid: '" + str + "'\n"); } return NULL; } } // namespace
sequences.c
#include "sequences.h" #define CHECK_FSCANF(x) if(!x) { fprintf(stderr, "fscanf failed on %s:%d\n", __FILE__,__LINE__); } /* preprocess_db function preprocess the database sequences named input_filename. The preprocessed database filenames start with out_filename. */ void preprocess_db (char * input_filename, char * out_filename, int n_procs) { unsigned long int sequences_count=0, D=0, disp, i; unsigned short int *sequences_lengths=NULL, * title_lengths=NULL, length=0; char ** sequences=NULL, **titles=NULL, buffer[BUFFER_SIZE], filename[BUFFER_SIZE], * res, *b=NULL, diff, new_line='\n'; FILE * sequences_file, *titles_file, *info_file, * bin_file; int max_title_length; double tick= dwalltime(); // open dabatase sequence filename sequences_file = fopen(input_filename,"r"); if (sequences_file == NULL) { printf("SWIMM: An error occurred while opening input sequence file.\n"); exit(2); } // Allocate memory for sequences_lengths array sequences_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int)); title_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int)); // Calculate number of sequences in database and its lengths sequences_count=0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { length = 0; // read title while (strrchr(buffer,new_line) == NULL) { length += strlen(buffer); res = fgets(buffer,BUFFER_SIZE,sequences_file); } title_lengths[sequences_count] = length + strlen(buffer) + 1; // read sequence length = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while ((res != NULL) && (buffer[0] != '>')) { length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } sequences_lengths[sequences_count] = length; (sequences_count)++; if ((sequences_count) % ALLOCATION_CHUNK == 0) { sequences_lengths = (unsigned short int *) realloc(sequences_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int)); title_lengths = (unsigned short int *) realloc(title_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int)); } } // Allocate memory for sequences array sequences = (char **) malloc(sequences_count*sizeof(char *)); if (sequences == NULL) { printf("SWIMM: An error occurred while allocating memory for sequences.\n"); exit(1); } for (i=0; i<sequences_count; i++ ) { sequences[i] = (char *) malloc(sequences_lengths[i]*sizeof(char)); if (sequences[i] == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } } // Rewind sequences database file rewind(sequences_file); // Read sequences from the database file and load them in sequences array i = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { // read title while (strrchr(buffer,new_line) == NULL) res = fgets(buffer,BUFFER_SIZE,sequences_file); // read sequence length = 1; res = fgets(buffer,BUFFER_SIZE,sequences_file); while ((res != NULL) && (buffer[0] != '>')) { //printf("%s %d\n",buffer,strlen(buffer)); strncpy(sequences[i]+(length-1),buffer,strlen(buffer)-1); length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } i++; } // Rewind sequences database file rewind(sequences_file); // Allocate memory for titles array titles = (char **) malloc(sequences_count*sizeof(char *)); if (titles == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); } for (i=0; i<sequences_count; i++ ) { titles[i] = (char *) malloc(title_lengths[i]*sizeof(char)); if (titles[i] == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); } } // calculate max title length max_title_length = 0; for (i=0; i<sequences_count ; i++) max_title_length = (max_title_length > title_lengths[i] ? max_title_length : title_lengths[i]); // free memory free(title_lengths); // read sequence headers i = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { // discard sequences while ((res != NULL) && (buffer[0] != '>')) res = fgets(buffer,BUFFER_SIZE,sequences_file); if (res != NULL){ // read header length = 1; do{ strncpy(titles[i]+(length-1),buffer,strlen(buffer)-1); length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } while (strrchr(buffer,new_line) == NULL); titles[i][length] = '\0'; i++; } } // Close sequences database file fclose(sequences_file); // Sort sequence array by length sort_sequences(sequences,titles,sequences_lengths, sequences_count, n_procs); // Create titles file: this text file contains the sequences description sprintf(filename,"%s.desc",out_filename); titles_file = fopen(filename,"w"); if (titles_file == NULL) { printf("SWIMM: An error occurred while opening sequence header file.\n"); exit(2); } // write titles for (i=0; i<sequences_count ; i++) fprintf(titles_file,"%s\n",titles[i]); // close titles file fclose(titles_file); // calculate total number of residues #pragma omp parallel for reduction(+:D) num_threads(n_procs) for (i=0; i< sequences_count; i++ ) D = D + sequences_lengths[i]; // transform bidimensional sequence array to a unidimensional one b = (char *) malloc(D*sizeof(char)); if (b == NULL) { printf("SWIMM: An error occurred while allocating memory for sequences.\n"); exit(1); } disp = 0; for (i=0; i< sequences_count; i++ ) { memcpy(b+disp,sequences[i],sequences_lengths[i]); disp += sequences_lengths[i]; } // Free memory for (i=0; i< sequences_count; i++ ) free(sequences[i]); free(sequences); // preprocess vect sequences DB // original alphabet: 'A'..'Z' => preprocessed alphabet: 0..24 (J, O and U are replaced with dummy symbol) #pragma omp parallel for private(diff) num_threads(n_procs) schedule(dynamic) for (i=0; i< D; i++) { b[i] = ((b[i] == 'J') ? DUMMY_ELEMENT : b[i]); b[i] = ((b[i] == 'O') ? DUMMY_ELEMENT : b[i]); b[i] = ((b[i] == 'U') ? DUMMY_ELEMENT : b[i]); diff = 'A'; diff = (b[i] > 'J' ? diff+1 : diff); diff = (b[i] > 'O' ? diff+1 : diff); diff = (b[i] > 'U' ? diff+1 : diff); b[i] -= diff; } // Create info file: this file contains sequences count, number of residues and the maximum title length sprintf(filename,"%s.info",out_filename); info_file = fopen(filename,"w"); if (info_file == NULL) { printf("SWIMM: An error occurred while opening info file.\n"); exit(2); } // Write info fprintf(info_file,"%ld %ld %d",sequences_count,D,max_title_length); // close info file fclose(info_file); // Create sequences binary file: this file contains first the sequences lengths and then the preprocessed sequences residues sprintf(filename,"%s.seq",out_filename); bin_file = fopen(filename,"wb"); if (bin_file == NULL) { printf("SWIMM: An error occurred while opening sequence file.\n"); exit(2); } // Write vectorized sequences lengths fwrite(sequences_lengths,sizeof(unsigned short int),sequences_count,bin_file); //Write sequences fwrite(b,sizeof(char),D,bin_file); // Close bin file fclose(bin_file); // free memory free(sequences_lengths); free(b); printf("\nSWIMM v%s\n\n",VERSION); printf("Database file:\t\t\t %s\n",input_filename); printf("Database size:\t\t\t%ld sequences (%ld residues) \n",sequences_count,D); printf("Preprocessed database name:\t%s\n",out_filename); printf("Preprocessing time:\t\t%lf seconds\n\n",dwalltime()-tick); } // Load query sequence from file in a void load_query_sequences(char * queries_filename, int execution_mode, char ** ptr_query_sequences, char *** ptr_query_headers, unsigned short int **ptr_query_sequences_lengths, unsigned short int **ptr_m, unsigned long int * query_sequences_count, unsigned long int * ptr_Q, unsigned int ** ptr_query_sequences_disp, int n_procs) { long int i; unsigned long int sequences_count=0, Q=0, disp; unsigned int * sequences_disp; unsigned short int *sequences_lengths, *m, * title_lengths, *tmp, length=0; char ** sequences=NULL, **titles, buffer[BUFFER_SIZE], * res, *a, diff, new_line='\n'; FILE * sequences_file; // open query sequence filename sequences_file = fopen(queries_filename,"r"); if (sequences_file == NULL) { printf("SWIMM: An error occurred while opening input sequence file.\n"); exit(2); } // Allocate memory for sequences_lengths array sequences_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int)); title_lengths = (unsigned short int *) malloc (ALLOCATION_CHUNK*sizeof(unsigned short int)); // Calculate number of sequences in database and its lengths sequences_count=0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { length = 0; // read title while (strrchr(buffer,new_line) == NULL) { length += strlen(buffer); res = fgets(buffer,BUFFER_SIZE,sequences_file); } title_lengths[sequences_count] = length + strlen(buffer) + 1; // read sequence length = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while ((res != NULL) && (buffer[0] != '>')) { length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } sequences_lengths[sequences_count] = length; (sequences_count)++; if ((sequences_count) % ALLOCATION_CHUNK == 0) { sequences_lengths = (unsigned short int *) realloc(sequences_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int)); title_lengths = (unsigned short int *) realloc(title_lengths,((sequences_count)+ALLOCATION_CHUNK)*sizeof(unsigned short int)); } } // copy lengths to aligned buffer tmp = sequences_lengths; m = (unsigned short int *) _mm_malloc (sequences_count*sizeof(unsigned short int), (execution_mode == CPU_ONLY ? 32 : 64)); sequences_lengths = (unsigned short int *) _mm_malloc (sequences_count*sizeof(unsigned short int), (execution_mode == CPU_ONLY ? 32 : 64)); memcpy(m,tmp,sequences_count*sizeof(unsigned short int)); memcpy(sequences_lengths,tmp,sequences_count*sizeof(unsigned short int)); free(tmp); // Allocate memory for sequences array sequences = (char **) malloc(sequences_count*sizeof(char *)); if (sequences == NULL) { printf("SWIMM: An error occurred while allocating memory for query sequences.\n"); exit(1); } for (i=0; i<sequences_count; i++ ) { sequences[i] = (char *) malloc(sequences_lengths[i]*sizeof(char)); if (sequences[i] == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } } // Rewind sequences database file rewind(sequences_file); // Read sequences from the database file and load them in sequences array i = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { // read title while (strrchr(buffer,new_line) == NULL) res = fgets(buffer,BUFFER_SIZE,sequences_file); // read sequence length = 1; res = fgets(buffer,BUFFER_SIZE,sequences_file); while ((res != NULL) && (buffer[0] != '>')) { //printf("%s %d\n",buffer,strlen(buffer)); strncpy(sequences[i]+(length-1),buffer,strlen(buffer)-1); length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } i++; } // Rewind sequences database file rewind(sequences_file); // Allocate memory for titles array titles = (char **) malloc(sequences_count*sizeof(char *)); if (titles == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); } for (i=0; i<sequences_count; i++ ) { titles[i] = (char *) malloc(title_lengths[i]*sizeof(char)); if (titles[i] == NULL) { printf("SWIMM: An error occurred while allocating memory for sequence titles.\n"); exit(1); } } i = 0; res = fgets(buffer,BUFFER_SIZE,sequences_file); while (res != NULL) { // discard sequences while ((res != NULL) && (buffer[0] != '>')) res = fgets(buffer,BUFFER_SIZE,sequences_file); if (res != NULL){ // read header length = 1; do{ strncpy(titles[i]+(length-1),buffer,strlen(buffer)-1); length += strlen(buffer)-1; res = fgets(buffer,BUFFER_SIZE,sequences_file); } while (strrchr(buffer,new_line) == NULL); titles[i][length] = '\0'; i++; } } // Close sequences database file fclose(sequences_file); // Sort sequence array by length sort_sequences(sequences,titles,sequences_lengths, sequences_count, n_procs); // make sequences length even for CPU and Hybrid computing if (execution_mode == MIC_ONLY){ // calculate total number of residues #pragma omp parallel for reduction(+:Q) num_threads(n_procs) for (i=0; i< sequences_count; i++ ) Q = Q + sequences_lengths[i]; *ptr_Q = Q; a = (char *) _mm_malloc(Q*sizeof(char), 64); if (a == NULL) { printf("SWIMM: An error occurred while allocating memory for sequences.\n"); exit(1); } disp = 0; for (i=0; i< sequences_count; i++ ) { // copy query sequence memcpy(a+disp,sequences[i],sequences_lengths[i]); disp += sequences_lengths[i]; } } else { // calculate total number of residues #pragma omp parallel for reduction(+:Q) num_threads(n_procs) for (i=0; i< sequences_count; i++ ) Q = Q + sequences_lengths[i] + (sequences_lengths[i]%2); *ptr_Q = Q; a = (char *) _mm_malloc(Q*sizeof(char), (execution_mode == CPU_ONLY ? 32 : 64)); if (a == NULL) { printf("SWIMM: An error occurred while allocating memory for sequences.\n"); exit(1); } disp = 0; for (i=0; i< sequences_count; i++ ) { // copy query sequence memcpy(a+disp,sequences[i],sequences_lengths[i]); // if length is odd then make it even and copy dummy element at last position if (sequences_lengths[i]%2==1){ a[disp+sequences_lengths[i]]=DUMMY_ELEMENT; m[i]++; } disp += m[i]; } } // process vect sequences DB #pragma omp parallel for private(diff) num_threads(n_procs) schedule(dynamic) for (i=0; i< Q; i++) { a[i] = ((a[i] == 'J') ? DUMMY_ELEMENT : a[i]); a[i] = ((a[i] == 'O') ? DUMMY_ELEMENT : a[i]); a[i] = ((a[i] == 'U') ? DUMMY_ELEMENT : a[i]); diff = 'A'; diff = (a[i] > 'J' ? diff+1 : diff); diff = (a[i] > 'O' ? diff+1 : diff); diff = (a[i] > 'U' ? diff+1 : diff); a[i] -= diff; } // Calculate displacement for current sequences db sequences_disp = (unsigned int *) _mm_malloc((sequences_count+1)*sizeof(unsigned int), (execution_mode == CPU_ONLY ? 32 : 64)); sequences_disp[0] = 0; for (i=1; i < sequences_count+1; i++) sequences_disp[i] = sequences_disp[i-1] + m[i-1]; *ptr_query_sequences = a; *ptr_query_sequences_lengths = sequences_lengths; *ptr_m = m; *ptr_query_sequences_disp = sequences_disp; *ptr_query_headers = titles; *query_sequences_count = sequences_count; // Free memory for (i=0; i< sequences_count; i++ ) free(sequences[i]); free(sequences); free(title_lengths); } void assemble_multiple_chunks_db (char * sequences_filename, int vector_length, unsigned long int max_chunk_size, unsigned long int * sequences_count, unsigned long int * D, unsigned short int * sequences_db_max_length, int * max_title_length, unsigned long int * vect_sequences_count, unsigned long int * vD, char ***ptr_chunk_vect_sequences_db, unsigned int * chunk_count, unsigned int ** ptr_chunk_vect_sequences_db_count, unsigned long int ** ptr_chunk_vD, unsigned short int *** ptr_chunk_vect_sequences_db_lengths, unsigned int *** ptr_chunk_vect_sequences_db_disp, int n_procs) { char ** sequences, *s, **chunk_vect_sequences_db, filename[200], *b; unsigned short int ** chunk_vect_sequences_db_lengths, * sequences_lengths, * vect_sequences_lengths; unsigned long int i, ii, j, jj, k, * chunk_vD, accum, aux_vD=0, offset, chunk_size, * vect_sequences_disp; unsigned int * chunk_vect_sequences_count, **chunk_vect_sequences_disp, c; FILE * sequences_file, * info_file; // Open info file sprintf(filename,"%s.info",sequences_filename); info_file = fopen(filename,"r"); if (info_file == NULL) { printf("SWIMM: An error occurred while opening info file.\n"); exit(2); } CHECK_FSCANF(fscanf(info_file,"%ld %ld %d",sequences_count,D,max_title_length)); fclose(info_file); // Open sequences file sprintf(filename,"%s.seq",sequences_filename); sequences_file = fopen(filename,"r"); if (sequences_file == NULL) { printf("SWIMM: An error occurred while opening info file.\n"); exit(2); } // Read sequences lengths sequences_lengths = (unsigned short int *) malloc((*sequences_count)*sizeof(unsigned short int)); fread(sequences_lengths,sizeof(unsigned short int),*sequences_count,sequences_file); // Read sequences s = (char *) malloc((*D)*sizeof(char)); fread(s,sizeof(char),*D,sequences_file); fclose(sequences_file); sequences = (char **) malloc((*sequences_count)*sizeof(char *)); sequences[0] = s; for (i=1; i<*sequences_count ; i++) sequences[i] = sequences[i-1] + sequences_lengths[i-1]; // calculate vect_sequences_count *vect_sequences_count = ceil( (double) (*sequences_count) / (double) vector_length); // Allocate memory for vect_sequences_lengths vect_sequences_lengths = (unsigned short int *) malloc((*vect_sequences_count)*sizeof(unsigned short int)); if (vect_sequences_lengths == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } vect_sequences_disp = (unsigned long int *) malloc((*vect_sequences_count+1)*sizeof(unsigned long int)); if (vect_sequences_disp == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } // calculate values for vect_sequences_lengths array for (i=0; i< *vect_sequences_count - 1; i++ ) vect_sequences_lengths[i] = sequences_lengths[(i+1)*vector_length-1]; vect_sequences_lengths[*vect_sequences_count-1] = sequences_lengths[*sequences_count-1]; // make length multiple of SEQ_LEN_MULT to allow manual loop unrolling for (i=0; i< *vect_sequences_count; i++ ) vect_sequences_lengths[i] = ceil( (double) vect_sequences_lengths[i] / (double) SEQ_LEN_MULT) * SEQ_LEN_MULT; // Calculate displacement for current sequences db vect_sequences_disp[0] = 0; for (k=1; k < *vect_sequences_count+1; k++) vect_sequences_disp[k] = vect_sequences_disp[k-1] + (vect_sequences_lengths[k-1]*vector_length); #pragma omp parallel for reduction(+:aux_vD) num_threads(n_procs) for (i=0; i< *vect_sequences_count; i++ ) aux_vD = aux_vD + vect_sequences_lengths[i]*vector_length; *vD = aux_vD; b = (char *) _mm_malloc((*vD)*sizeof(char),16); // Copy sequences db to host buffers reordering elements to get better locality when computing alignments for (i=0; i < *vect_sequences_count-1; i++) { for (j=0; j< vect_sequences_lengths[i]; j++ ) { for (k=0;k< vector_length; k++) if (j < sequences_lengths[i*vector_length+k]) *(b+vect_sequences_disp[i]+(j*vector_length)+k) = sequences[i*vector_length+k][j]; else *(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT; } } //rest = sequences_count % vector_length; for (i=*vect_sequences_count-1, j=0; j< vect_sequences_lengths[i]; j++ ) { for (k=0;k< vector_length; k++) if (i*vector_length+k < *sequences_count){ if (j < sequences_lengths[i*vector_length+k]) *(b+vect_sequences_disp[i]+(j*vector_length)+k) = sequences[i*vector_length+k][j]; else *(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT; } else *(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT; } // calculate chunks *chunk_count = 1; chunk_vect_sequences_count = (unsigned int *) malloc((*chunk_count)*sizeof(unsigned int)); if (chunk_vect_sequences_count == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } i = 0; c = 0; while (i< *vect_sequences_count) { // group sequences till reach max chunk size j = 0; chunk_size = 0; accum = vect_sequences_lengths[i]*vector_length*sizeof(char) + sizeof(unsigned short int) + sizeof(unsigned int); // secuencias + longitud + desplazamiento while ((i< *vect_sequences_count) && (chunk_size <= max_chunk_size)) { chunk_size += accum; j++; i++; if (i < *vect_sequences_count) accum = vect_sequences_lengths[i]*vector_length*sizeof(char) + sizeof(unsigned short int) + sizeof(unsigned int); // secuencias + longitud + desplazamiento } // number of sequences in chunk chunk_vect_sequences_count[c] = j; // increment chunk_count (*chunk_count)++; c++; chunk_vect_sequences_count = (unsigned int *) realloc(chunk_vect_sequences_count,(*chunk_count)*sizeof(unsigned int)); if (chunk_vect_sequences_count == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } } // update chunk count (*chunk_count)--; // calculate chunk_vect_sequences_db_lengths chunk_vect_sequences_db_lengths = (unsigned short int **) _mm_malloc((*chunk_count)*sizeof(unsigned short int *),64); if (chunk_vect_sequences_db_lengths == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } offset = 0; for (i=0; i< *chunk_count ; i++) { chunk_vect_sequences_db_lengths[i] = (unsigned short int *) _mm_malloc((chunk_vect_sequences_count[i])*sizeof(unsigned short int),64); memcpy(chunk_vect_sequences_db_lengths[i],vect_sequences_lengths+offset,(chunk_vect_sequences_count[i])*sizeof(unsigned short int)); offset += chunk_vect_sequences_count[i]; } // calculate chunk_vect_sequences_db_disp accum = 0; chunk_vect_sequences_disp = (unsigned int **) _mm_malloc((*chunk_count)*sizeof(unsigned int *),64); for (i=0; i< *chunk_count ; i++){ chunk_vect_sequences_disp[i] = (unsigned int *) _mm_malloc(chunk_vect_sequences_count[i]*sizeof(unsigned int),64); if (chunk_vect_sequences_disp[i] == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } // adapt sequence displacements to chunk offset = vect_sequences_disp[accum]; for ( j=0, jj=accum; j<chunk_vect_sequences_count[i] ; j++, jj++) chunk_vect_sequences_disp[i][j] = (unsigned int)(vect_sequences_disp[jj] - offset); accum += chunk_vect_sequences_count[i]; } // calculate chunk_vD chunk_vD = (unsigned long int *) malloc((*chunk_count)*sizeof(unsigned long int)); if (chunk_vD == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } offset = 0; for (i=0; i< *chunk_count; i++){ ii = offset + chunk_vect_sequences_count[i]; chunk_vD[i] = vect_sequences_disp[ii] - vect_sequences_disp[offset]; offset = ii; } // calculate chunk_vect_sequences_db chunk_vect_sequences_db = (char **) _mm_malloc((*chunk_count)*sizeof(char *), 64); if (chunk_vect_sequences_db == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } offset = 0; for (i=0; i< *chunk_count ; i++) { chunk_vect_sequences_db[i] = b + offset; offset += chunk_vD[i]; } *ptr_chunk_vect_sequences_db = chunk_vect_sequences_db; *ptr_chunk_vect_sequences_db_count = chunk_vect_sequences_count; *ptr_chunk_vD = chunk_vD; *ptr_chunk_vect_sequences_db_lengths = chunk_vect_sequences_db_lengths; *ptr_chunk_vect_sequences_db_disp = chunk_vect_sequences_disp; *sequences_db_max_length = sequences_lengths[*sequences_count-1]; free(s); free(sequences); free(sequences_lengths); free(vect_sequences_lengths); free(vect_sequences_disp); } void assemble_single_chunk_db (char * sequences_filename, int vector_length, unsigned long int * sequences_count, unsigned long int * D, unsigned short int * sequences_db_max_length, int * max_title_length, unsigned long int * vect_sequences_db_count, unsigned long int * vD, char **ptr_vect_sequences_db, unsigned short int ** ptr_vect_sequences_db_lengths, unsigned short int ** ptr_vect_sequences_db_blocks, unsigned long int ** ptr_vect_sequences_db_disp, int n_procs, int block_size) { char ** sequences, *s, filename[200], *b; unsigned short int * vect_sequences_lengths, * vect_sequences_blocks, * sequences_lengths; unsigned long int i, j, k, aux_vD=0, *vect_sequences_disp; FILE * sequences_file, * info_file; // Open info file sprintf(filename,"%s.info",sequences_filename); info_file = fopen(filename,"r"); if (info_file == NULL) { printf("SWIMM: An error occurred while opening info file.\n"); exit(2); } fscanf(info_file,"%ld %ld %d",sequences_count,D,max_title_length); fclose(info_file); // Open sequences file sprintf(filename,"%s.seq",sequences_filename); sequences_file = fopen(filename,"rb"); if (sequences_file == NULL) { printf("SWIMM: An error occurred while opening info file.\n"); exit(2); } // Read sequences lengths sequences_lengths = (unsigned short int *) malloc((*sequences_count)*sizeof(unsigned short int)); fread(sequences_lengths,sizeof(unsigned short int),*sequences_count,sequences_file); // Read sequences s = (char *) malloc((*D)*sizeof(char)); fread(s,sizeof(char),*D,sequences_file); fclose(sequences_file); sequences = (char **) malloc((*sequences_count)*sizeof(char *)); sequences[0] = s; for (i=1; i<*sequences_count ; i++) sequences[i] = sequences[i-1] + sequences_lengths[i-1]; // calculate vect_sequences_count *vect_sequences_db_count = ceil( (double) (*sequences_count) / (double) vector_length); // Allocate memory for vect_sequences_lengths vect_sequences_lengths = (unsigned short int *) _mm_malloc((*vect_sequences_db_count)*sizeof(unsigned short int),32); if (vect_sequences_lengths == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } vect_sequences_blocks = (unsigned short int *) _mm_malloc((*vect_sequences_db_count)*sizeof(unsigned short int),32); if (vect_sequences_blocks == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } vect_sequences_disp = (unsigned long int *) _mm_malloc((*vect_sequences_db_count+1)*sizeof(unsigned long int),32); if (vect_sequences_disp == NULL) { printf("SWIMM: An error occurred while allocating memory.\n"); exit(1); } // calculate values for vect_sequences_lengths array for (i=0; i< *vect_sequences_db_count - 1; i++ ) vect_sequences_lengths[i] = sequences_lengths[(i+1)*vector_length-1]; vect_sequences_lengths[*vect_sequences_db_count-1] = sequences_lengths[*sequences_count-1]; // make length multiple of SEQ_LEN_MULT to allow manual loop unrolling for (i=0; i< *vect_sequences_db_count; i++ ) vect_sequences_lengths[i] = ceil( (double) vect_sequences_lengths[i] / (double) SEQ_LEN_MULT) * SEQ_LEN_MULT; // calculate number of blocks for (i=0; i< *vect_sequences_db_count; i++ ) vect_sequences_blocks[i] = ceil((double) vect_sequences_lengths[i] / block_size); #pragma omp parallel for reduction(+:aux_vD) num_threads(n_procs) for (i=0; i< *vect_sequences_db_count; i++ ) aux_vD = aux_vD + vect_sequences_lengths[i]*vector_length; *vD = aux_vD; b = (char *) _mm_malloc((*vD)*sizeof(char),32); // Calculate displacement for current sequences db vect_sequences_disp[0] = 0; for (k=1; k < *vect_sequences_db_count+1; k++) vect_sequences_disp[k] = vect_sequences_disp[k-1] + (vect_sequences_lengths[k-1]*vector_length); // Copy sequences db to host buffers reordering elements to get better locality when computing alignments for (i=0; i < *vect_sequences_db_count-1; i++) { for (j=0; j< vect_sequences_lengths[i]; j++ ) { for (k=0;k< vector_length; k++) if (j < sequences_lengths[i*vector_length+k]) *(b+vect_sequences_disp[i]+(j*vector_length)+k) = sequences[i*vector_length+k][j]; else *(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT; } } //rest = sequences_count % vector_length; for (i=*vect_sequences_db_count-1, j=0; j< vect_sequences_lengths[i]; j++ ) { for (k=0;k< vector_length; k++) if (i*vector_length+k < *sequences_count){ if (j < sequences_lengths[i*vector_length+k]) *(b+vect_sequences_disp[i]+(j*vector_length)+k) = sequences[i*vector_length+k][j]; else *(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT; } else *(b+vect_sequences_disp[i]+(j*vector_length)+k) = PREPROCESSED_DUMMY_ELEMENT; } *ptr_vect_sequences_db = b; *ptr_vect_sequences_db_lengths = vect_sequences_lengths; *ptr_vect_sequences_db_blocks = vect_sequences_blocks; *ptr_vect_sequences_db_disp = vect_sequences_disp; *sequences_db_max_length = sequences_lengths[*sequences_count-1]; free(s); free(sequences); free(sequences_lengths); } void load_database_headers (char * sequences_filename, unsigned long int sequences_count, int max_title_length, char *** ptr_sequences_db_headers) { char ** sequences_db_headers, filename[200], * header; FILE * header_file; unsigned long int i; // Load sequence headers // Open header file sprintf(filename,"%s.desc",sequences_filename); header_file = fopen(filename,"r"); if (header_file == NULL) { printf("SWIMM: An error occurred while opening sequence description file.\n"); exit(3); } // Read sequences lengths sequences_db_headers = (char **) malloc(sequences_count*sizeof(char *)); header = (char *) malloc((max_title_length+1)*sizeof(char)); for (i=0; i<sequences_count; i++){ fgets(header,max_title_length,header_file); sequences_db_headers[i] = (char *) malloc((strlen(header)+1)*sizeof(char)); strcpy(sequences_db_headers[i],header); } fclose(header_file); free(header); *ptr_sequences_db_headers = sequences_db_headers; } void merge_sequences(char ** sequences, char ** titles, unsigned short int * sequences_lengths, unsigned long int size) { unsigned long int i1 = 0; unsigned long int i2 = size / 2; unsigned long int it = 0; // allocate memory for temporary buffers char ** tmp1 = (char **) malloc(size*sizeof(char *)); char ** tmp2 = (char **) malloc(size*sizeof(char *)); unsigned short int * tmp3 = (unsigned short int *) malloc (size*sizeof(unsigned short int)); while(i1 < size/2 && i2 < size) { if (sequences_lengths[i1] <= sequences_lengths[i2]) { tmp1[it] = sequences[i1]; tmp2[it] = titles[i1]; tmp3[it] = sequences_lengths[i1]; i1++; } else { tmp1[it] = sequences[i2]; tmp2[it] = titles[i2]; tmp3[it] = sequences_lengths[i2]; i2 ++; } it ++; } while (i1 < size/2) { tmp1[it] = sequences[i1]; tmp2[it] = titles[i1]; tmp3[it] = sequences_lengths[i1]; i1++; it++; } while (i2 < size) { tmp1[it] = sequences[i2]; tmp2[it] = titles[i2]; tmp3[it] = sequences_lengths[i2]; i2++; it++; } memcpy(sequences, tmp1, size*sizeof(char *)); memcpy(titles, tmp2, size*sizeof(char *)); memcpy(sequences_lengths, tmp3, size*sizeof(unsigned short int)); free(tmp1); free(tmp2); free(tmp3); } void mergesort_sequences_serial (char ** sequences, char ** titles, unsigned short int * sequences_lengths, unsigned long int size) { char * tmp_seq; unsigned short int tmp_seq_len; if (size == 2) { if (sequences_lengths[0] > sequences_lengths[1]) { // swap sequences tmp_seq = sequences[0]; sequences[0] = sequences[1]; sequences[1] = tmp_seq; // swap titles tmp_seq = titles[0]; titles[0] = titles[1]; titles[1] = tmp_seq; // swap sequences lengths tmp_seq_len = sequences_lengths[0]; sequences_lengths[0] = sequences_lengths[1]; sequences_lengths[1] = tmp_seq_len; return; } } else { if (size > 2){ mergesort_sequences_serial(sequences, titles, sequences_lengths, size/2); mergesort_sequences_serial(sequences + size/2, titles + size/2, sequences_lengths + size/2, size - size/2); merge_sequences(sequences, titles, sequences_lengths, size); } } } void sort_sequences (char ** sequences, char ** titles, unsigned short int * sequences_lengths, unsigned long int size, int threads) { if ( threads == 1) { mergesort_sequences_serial(sequences, titles, sequences_lengths, size); } else if (threads > 1) { #pragma omp parallel sections { #pragma omp section sort_sequences(sequences, titles, sequences_lengths, size/2, threads/2); #pragma omp section sort_sequences(sequences + size/2, titles + size/2, sequences_lengths + size/2, size-size/2, threads-threads/2); } merge_sequences(sequences, titles, sequences_lengths, size); } // threads > 1 }
GB_binop__isne_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_01__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_02__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_03__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int64) // A*D function (colscale): GB (_AxD__isne_int64) // D*A function (rowscale): GB (_DxB__isne_int64) // C+=B function (dense accum): GB (_Cdense_accumB__isne_int64) // C+=b function (dense accum): GB (_Cdense_accumb__isne_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int64) // C=scalar+B GB (_bind1st__isne_int64) // C=scalar+B' GB (_bind1st_tran__isne_int64) // C=A+scalar GB (_bind2nd__isne_int64) // C=A'+scalar GB (_bind2nd_tran__isne_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT64 || GxB_NO_ISNE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isne_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isne_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB081-func-arg-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A function argument passed by value should be private inside the function. Variable i is read only. */ #include<stdio.h> #include<assert.h> /* argument pass-by-value */ void f1(int q) { q += 1; } int main() { int i=0; #pragma omp parallel { f1(i); } assert (i==0); printf ("i=%d\n",i); return 0; }
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ``fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ``classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { MagickRealType center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { MagickRealType tau; ssize_t left, right; MagickRealType mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { MagickRealType tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static MagickRealType OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *), ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const MagickRealType cluster_threshold, % const MagickRealType weighting_exponent, % const MagickBooleanType verbose) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % */ static MagickBooleanType Classify(Image *image,short **extrema, const MagickRealType cluster_threshold, const MagickRealType weighting_exponent,const MagickBooleanType verbose) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExceptionInfo *exception; ExtentPacket blue, green, red; MagickOffsetType progress; MagickRealType *free_squares; MagickStatusType status; register ssize_t i; register MagickRealType *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); exception=(&image->exception); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(MagickRealType) i*(MagickRealType) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(indexes+x,0); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if (((ssize_t) ScaleQuantumToChar(q->red) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->red) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) <= (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(indexes+x,cluster->id); break; } } if (cluster == (Cluster *) NULL) { MagickRealType distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(indexes+x,j); } } } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const MagickRealType *histogram, % MagickRealType *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of MagickRealTypes is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const MagickRealType *histogram, MagickRealType *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % MagickPixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, MagickPixelPacket *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; MagickRealType threshold; register const PixelPacket *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++; p++; } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register MagickRealType sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(MagickRealType) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; MagickRealType average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(MagickRealType *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(MagickRealType *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(MagickRealType) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(MagickRealType *) RelinquishMagickMemory(derivative); second_derivative=(MagickRealType *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(MagickRealType) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const MagickRealType tau, % MagickRealType *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau, MagickRealType *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap"); alpha=1.0/(tau*sqrt(2.0*MagickPI)); beta=(-1.0/(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=(MagickRealType) (alpha*sum); } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace); InitializeHistogram(image,histogram,&image->exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose); (void) TransformImageColorspace(image,previous_colorspace); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(MagickRealType *second_derivative, % const MagickRealType smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of MagickRealTypes representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(MagickRealType *second_derivative, const MagickRealType smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
density_prior_box_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <vector> #include "paddle/fluid/operators/detection/prior_box_op.h" namespace paddle { namespace operators { template <typename T> class DensityPriorBoxOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<paddle::framework::Tensor>("Input"); auto* image = ctx.Input<paddle::framework::Tensor>("Image"); auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes"); auto* vars = ctx.Output<paddle::framework::Tensor>("Variances"); auto variances = ctx.Attr<std::vector<float>>("variances"); auto clip = ctx.Attr<bool>("clip"); auto fixed_sizes = ctx.Attr<std::vector<float>>("fixed_sizes"); auto fixed_ratios = ctx.Attr<std::vector<float>>("fixed_ratios"); auto densities = ctx.Attr<std::vector<int>>("densities"); T step_w = static_cast<T>(ctx.Attr<float>("step_w")); T step_h = static_cast<T>(ctx.Attr<float>("step_h")); T offset = static_cast<T>(ctx.Attr<float>("offset")); auto img_width = image->dims()[3]; auto img_height = image->dims()[2]; auto feature_width = input->dims()[3]; auto feature_height = input->dims()[2]; T step_width, step_height; if (step_w == 0 || step_h == 0) { step_width = static_cast<T>(img_width) / feature_width; step_height = static_cast<T>(img_height) / feature_height; } else { step_width = step_w; step_height = step_h; } int num_priors = 0; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for reduction(+ : num_priors) #endif for (size_t i = 0; i < densities.size(); ++i) { num_priors += (fixed_ratios.size()) * (pow(densities[i], 2)); } boxes->mutable_data<T>(ctx.GetPlace()); vars->mutable_data<T>(ctx.GetPlace()); auto box_dim = vars->dims(); boxes->Resize({feature_height, feature_width, num_priors, 4}); auto e_boxes = framework::EigenTensor<T, 4>::From(*boxes).setConstant(0.0); int step_average = static_cast<int>((step_width + step_height) * 0.5); std::vector<float> sqrt_fixed_ratios; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (size_t i = 0; i < fixed_ratios.size(); i++) { sqrt_fixed_ratios.push_back(sqrt(fixed_ratios[i])); } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int h = 0; h < feature_height; ++h) { for (int w = 0; w < feature_width; ++w) { T center_x = (w + offset) * step_width; T center_y = (h + offset) * step_height; int idx = 0; // Generate density prior boxes with fixed sizes. for (size_t s = 0; s < fixed_sizes.size(); ++s) { auto fixed_size = fixed_sizes[s]; int density = densities[s]; int shift = step_average / density; // Generate density prior boxes with fixed ratios. for (size_t r = 0; r < fixed_ratios.size(); ++r) { float box_width_ratio = fixed_size * sqrt_fixed_ratios[r]; float box_height_ratio = fixed_size / sqrt_fixed_ratios[r]; float density_center_x = center_x - step_average / 2. + shift / 2.; float density_center_y = center_y - step_average / 2. + shift / 2.; for (int di = 0; di < density; ++di) { for (int dj = 0; dj < density; ++dj) { float center_x_temp = density_center_x + dj * shift; float center_y_temp = density_center_y + di * shift; e_boxes(h, w, idx, 0) = std::max( (center_x_temp - box_width_ratio / 2.) / img_width, 0.); e_boxes(h, w, idx, 1) = std::max( (center_y_temp - box_height_ratio / 2.) / img_height, 0.); e_boxes(h, w, idx, 2) = std::min( (center_x_temp + box_width_ratio / 2.) / img_width, 1.); e_boxes(h, w, idx, 3) = std::min( (center_y_temp + box_height_ratio / 2.) / img_height, 1.); idx++; } } } } } } if (clip) { T* dt = boxes->data<T>(); std::transform(dt, dt + boxes->numel(), dt, [](T v) -> T { return std::min<T>(std::max<T>(v, 0.), 1.); }); } framework::Tensor var_t; var_t.mutable_data<T>( phi::make_ddim({1, static_cast<int>(variances.size())}), ctx.GetPlace()); auto var_et = framework::EigenTensor<T, 2>::From(var_t); for (size_t i = 0; i < variances.size(); ++i) { var_et(0, i) = variances[i]; } int box_num = feature_height * feature_width * num_priors; auto var_dim = vars->dims(); vars->Resize({box_num, static_cast<int>(variances.size())}); auto e_vars = framework::EigenMatrix<T, Eigen::RowMajor>::From(*vars); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int i = 0; i < box_num; ++i) { for (size_t j = 0; j < variances.size(); ++j) { e_vars(i, j) = variances[j]; } } vars->Resize(var_dim); boxes->Resize(box_dim); } }; // namespace operators } // namespace operators } // namespace paddle
simplest.c
int main() { int x; #pragma omp parallel { int p; if (1) { #pragma omp atomic write x = 0; } else { #pragma omp atomic read p = x; } #pragma omp barrier x; } }
GB_unop__identity_uint8_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_uint32) // op(A') function: GB (_unop_tran__identity_uint8_uint32) // C type: uint8_t // A type: uint32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_uint32) ( uint8_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <omp.h> #define TRUE 1 #define FALSE 0 #define ATOM_H 0 #define ATOM_C 1 #define ATOM_O 2 #define ATOM_N 3 #define ATOM_UNKNOWN 4 #define ATOM_TYPES 5 #define MOD_MM3 1 #define MOD_TIP3P 2 #define MOD_GAFF 3 #define INITIAL_BONDS 5 // Number of bonds per atom possible. // If you've got some weird compound may need to increase /* 1 kcal/mol = 4184 J/mol * = 4184 kg m^2 s^-2 mol^-1 * = 4184 / (1.66*10^-27) amu m^2 s^-2 mol^-1 * = (4184 * (10^10)^2) / (1.66*10^-27) amu A^2 s^-2 mol^-1 * = (4184 * (10^10)^2 * (10^12)^-2) / (1.66*10^-27) amu A^2 ps^-2 mol^-1 * = (4184 * (10^10)^2 * (10^12)^-2) / (1.66*10^-27 * 6.02*10^23) amu A^2 ps^-2 * = 418.68 amu A^2 ps^-2 */ #define KCAL_ATOMIC 418.68 #define BOLTZMANN_K 0.001985875 * KCAL_ATOMIC // kb = 0.001985875 kcal mol^-1 K^-1, * 418.68 conversion. // CC = 9.987551 * 10^9 N m^2 C^-2 // 1 J = 1 N m // eg units are J m C^-2 // so CC / 4184 kcal m / C^2 mol // CC * 10^-10 / 4184 kcal Ang / C^2 mol // = #define COULOMB_CONSTANT 2.3871e-04 #define LANGEVIN 0 #define ANDERSEN 1 #define SP1 0 #define SP2 1 #define SP3 2 struct Vector { double x, y, z; }; struct Model { double eq_bond_length[ATOM_TYPES][ATOM_TYPES]; double eq_bond_K[ATOM_TYPES][ATOM_TYPES]; double angle_C[ATOM_TYPES]; double angle_Z[ATOM_TYPES]; double charge[ATOM_TYPES]; double vdwM[ATOM_TYPES]; double polarizability[ATOM_TYPES]; double dihedral_pref[ATOM_TYPES][ATOM_TYPES]; double dihedral_n[ATOM_TYPES][ATOM_TYPES]; double dihedral_gamma[ATOM_TYPES][ATOM_TYPES]; double vdwR[ATOM_TYPES]; double vdw_EDEP[ATOM_TYPES]; }; struct Atom { struct Vector v; struct Vector vel; unsigned int n_bonds; int check; unsigned int lim_bonds; struct Atom ** bonds; char name[20]; int type; int hybridization; unsigned int i; int andersen_f; }; /* Molecule container - n_atoms in as struct. */ struct Molecule { struct Atom *as; unsigned int n_atoms; struct Model *model; int thermostat; }; /** * Generates uniform random long double from 0 to 1 inclusive. * @return long double * Long double containing uniform random number. */ long double uniform_rand(void) { return ((long double) rand() + 1.) / ((long double) RAND_MAX + 1.); } /** * Uses Box-Muller method to generate a normally distributed random number. * @param mean * Mean of normal distribution to select random variable from * @param std * Standard deviation of normal distribution from which random variable selected * @return float * Returns normally distributed random number */ double norm_rand(double mean, double std) { // Box-Muller method double rnd1, rnd2; rnd1 = (double) uniform_rand(); rnd2 = (double) uniform_rand(); double unadj = sqrt(-2 * log(rnd1)) * cos(2 * M_PI * rnd2); //printf("%f, %f, %f\n", rnd1, rnd2, mean + std*unadj); return mean + std * unadj; } void setup_model(struct Model * m, int model) { int i, k; for (i = 0; i < ATOM_TYPES; i++) { for (k = 0; k < ATOM_TYPES; k++) { m->eq_bond_length[i][k] = 0; m->eq_bond_K[i][k] = 0; m->dihedral_pref[i][k] = 0; m->dihedral_n[i][k] = 0; m->dihedral_gamma[i][k] = 0; } m->charge[i] = 0; m->angle_C[i] = 0; m->angle_Z[i] = 0; m->vdwM[i] = 0; m->polarizability[i] = 0; m->vdwR[i] = 0; m->vdw_EDEP[i] = 0; } if (model == MOD_GAFF) { m->eq_bond_length[ATOM_H][ATOM_H] = 0.738; m->eq_bond_K[ATOM_H][ATOM_H] = 4.661; m->eq_bond_length[ATOM_H][ATOM_C] = 1.090; m->eq_bond_K[ATOM_H][ATOM_C] = 6.217; m->eq_bond_length[ATOM_C][ATOM_H] = 1.090; m->eq_bond_K[ATOM_C][ATOM_H] = 6.217; m->eq_bond_length[ATOM_O][ATOM_H] = 0.960; m->eq_bond_K[ATOM_O][ATOM_H] = 5.794; m->eq_bond_length[ATOM_H][ATOM_O] = 0.960; m->eq_bond_K[ATOM_H][ATOM_O] = 5.794; m->eq_bond_length[ATOM_N][ATOM_H] = 1.010; m->eq_bond_K[ATOM_N][ATOM_H] = 6.057; m->eq_bond_length[ATOM_H][ATOM_N] = 1.010; m->eq_bond_K[ATOM_H][ATOM_N] = 6.057; m->eq_bond_length[ATOM_C][ATOM_O] = 1.440; m->eq_bond_K[ATOM_C][ATOM_O] = 7.347; m->eq_bond_length[ATOM_O][ATOM_C] = 1.440; m->eq_bond_K[ATOM_O][ATOM_C] = 7.347; m->eq_bond_length[ATOM_C][ATOM_N] = 1.470; m->eq_bond_K[ATOM_C][ATOM_N] = 7.504; m->eq_bond_length[ATOM_N][ATOM_C] = 1.470; m->eq_bond_K[ATOM_N][ATOM_C] = 7.504; m->eq_bond_length[ATOM_C][ATOM_C] = 1.526; m->eq_bond_K[ATOM_C][ATOM_C] = 7.643; m->angle_C[ATOM_C] = 1.339; m->angle_C[ATOM_N] = 1.300; m->angle_C[ATOM_O] = 1.249; m->angle_Z[ATOM_C] = 1.183; m->angle_Z[ATOM_N] = 1.212; m->angle_Z[ATOM_O] = 1.219; m->angle_Z[ATOM_H] = 0.784; m->vdwM[ATOM_C] = 12.; m->vdwM[ATOM_H] = 1.008; m->vdwM[ATOM_O] = 16.; m->vdwM[ATOM_N] = 14.; m->polarizability[ATOM_C] = 0.360; m->polarizability[ATOM_N] = 0.530; m->polarizability[ATOM_H] = 0.135; m->polarizability[ATOM_O] = 0.465; m->dihedral_pref[ATOM_C][ATOM_C] = 1.2/4.; m->dihedral_pref[ATOM_C][ATOM_N] = 10./4.; m->dihedral_pref[ATOM_N][ATOM_C] = 10./4.; m->dihedral_pref[ATOM_C][ATOM_O] = 4.6/4.; m->dihedral_pref[ATOM_O][ATOM_C] = 4.6/4.; m->dihedral_gamma[ATOM_C][ATOM_C] = 180. * (M_PI / 180.); m->dihedral_gamma[ATOM_C][ATOM_N] = 180. * (M_PI / 180.); m->dihedral_gamma[ATOM_N][ATOM_C] = 180. * (M_PI / 180.); m->dihedral_gamma[ATOM_C][ATOM_O] = 180. * (M_PI / 180.); m->dihedral_gamma[ATOM_O][ATOM_C] = 180. * (M_PI / 180.); m->dihedral_n[ATOM_C][ATOM_C] = 2.; m->dihedral_n[ATOM_C][ATOM_N] = 2.; m->dihedral_n[ATOM_N][ATOM_C] = 2.; m->dihedral_n[ATOM_C][ATOM_O] = 2.; m->dihedral_n[ATOM_O][ATOM_C] = 2.; m->vdwR[ATOM_H] = 1.3870; m->vdwR[ATOM_C] = 1.9080; m->vdwR[ATOM_O] = 1.6612; m->vdwR[ATOM_N] = 1.8240; m->vdw_EDEP[ATOM_H] = 0.0157; m->vdw_EDEP[ATOM_C] = 0.0860; m->vdw_EDEP[ATOM_N] = 0.1700; m->vdw_EDEP[ATOM_O] = 0.2100; } return; } /* reset_check() * Resets all .check values to be FALSE. Should be run before _any_ * graph or rotate commands * Error modes; * - If not all atoms are allocated, will segfault. */ void reset_check(struct Molecule *m) { unsigned int i; for (i = 0; i < m->n_atoms; i++) { m->as[i].check = FALSE; } return; } /* add_atom() * Initialises atom *a. * arg *a: Pointer to atom struct to put data in. * arg x, y, z: positional coordinated. * arg name: atom name * returns: 1 in all cases. * Error modes; * - If atom at *a not allocated memory, will segfault. */ int add_atom(struct Atom *a, float x, float y, float z, char name[20]) { a->v.x = x; a->v.y = y; a->v.z = z; a->vel.x = 0; a->vel.y = 0; a->vel.z = 0; a->n_bonds = 0; a->lim_bonds = INITIAL_BONDS; int c_buf = 0; while (name[c_buf] == ' ') c_buf++; strcpy(a->name, name+c_buf); a->check = FALSE; if (a->name[0] == 'H') { a->type = ATOM_H; } else if (a->name[0] == 'C'){ a->type = ATOM_C; } else if (a->name[0] == 'O') { a->type = ATOM_O; } else { printf("Atom %s not added.\n", name); a->type = ATOM_UNKNOWN; } a->bonds = (struct Atom **) malloc(sizeof(struct Atom *) * INITIAL_BONDS); return 1; } /* add_bond() * Creates bond between atom *a and *b. * arg *a: Pointer to atom. * arg *b: Pointer to atom. * returns: 1 in all cases * Error modes; * - Will seg fault if *a or *b do not exist, or if there is insufficient memory. */ int add_bond(struct Atom *a, struct Atom *b) { a->bonds[a->n_bonds] = b; b->bonds[b->n_bonds] = a; a->n_bonds++; b->n_bonds++; if (a->n_bonds >= a->lim_bonds) { a->lim_bonds *= 2; a->bonds = (struct Atom **) realloc(a->bonds, sizeof(struct Atom *) * a->lim_bonds); } if (b->n_bonds >= b->lim_bonds) { b->lim_bonds *= 2; b->bonds = (struct Atom **) realloc(b->bonds, sizeof(struct Atom *) * b->lim_bonds); } return 1; } void set_vector(struct Vector *a, double x, double y, double z) { a->x = x; a->y = y; a->z = z; return; } /* free_atoms() * Frees bond arrays for each atom within molecule *m * * Error modes; * - Will double free if any atom has not been initialized. */ void free_atoms(struct Molecule *m) { unsigned int i; for (i = 0; i < m->n_atoms; i++) { free(m->as[i].bonds); } return; } void crash(struct Molecule *m) { free_atoms(m); free(m->as); exit(-1); } /* print_moleculef() * Prints molecular graph starting at atom n. * Iterates in a functional manner, setting check to TRUE once an * atom has been visited. Visits each atom once. * arg *a: Pointer to initial atom. * arg n: Atom to begin graph at */ void print_moleculef(struct Atom * a, unsigned int n) { // if we have been visited, terminate execution. if (a->check == TRUE) return; a->check = TRUE; unsigned int i,j; for (j = 0; j < n+1; j++) printf("> "); printf("Atom %s [%f, %f, %f] %d\n", a->name, a->v.x, a->v.y, a->v.z, a->n_bonds); // Recursively loop over, calling self for each neighbour. for (i = 0; i < a->n_bonds; i++) { print_moleculef(a->bonds[i], n+1); } return; } /* print_molecule() * Prints molecular system, starting with one atom and looping to others */ void print_molecule(struct Atom * a) { if (a->check == TRUE) return; a->check = TRUE; unsigned int i; printf("%f, %f, %f\n", a->v.x, a->v.y, a->v.z); for (i = 0; i < a->n_bonds; i++) { print_molecule(a->bonds[i]); } return; } /* sub_vector() * Subtracts vector *b from *a, and outputs into *res * arg *a: Vector * arg *b: Vector * arg *res: Vector, res = a - b */ void sub_vector(struct Vector *a, struct Vector *b, struct Vector *res) { // res = a - b res->x = a->x - b->x; res->y = a->y - b->y; res->z = a->z - b->z; return; } /* add_vector() * Adds vector *b to *a, and outputs into *res * arg *a: Vector * arg *b: Vector * arg *res: Vector, res = a + b */ void add_vector(struct Vector *a, struct Vector *b, struct Vector *res) { // res = a + b res->x = a->x + b->x; res->y = a->y + b->y; res->z = a->z + b->z; return; } /* magnitude() * Calculated magnitude of vector *v. * arg *v: Vector * returns: Magnitude */ float magnitude(struct Vector *v) { float n; n = powf(v->x, 2.) + powf(v->y, 2.) + powf(v->z, 2.); n = sqrtf(n); return n; } /* normalise() * Normalise vector *v in situ * arg *v: Vector to be normalised. * * Error modes * - If vector has 0 magnitude, will do nothing. */ void normalise(struct Vector *v) { float n = magnitude(v); if (n == 0) { printf("Division by 0\n"); return; } v->x /= n; v->y /= n; v->z /= n; return; } double dot(struct Vector *a, struct Vector *b) { return (a->x * b->x) + (a->y * b->y) + (a->z * b->z); } void print_vector(struct Vector *v) { printf("(%f, %f, %f)\n", v->x, v->y, v->z); } double calc_phi(struct Vector *a, struct Vector *b, struct Vector *c) { // calculates the ABC angle. struct Vector ab; sub_vector(a, b, &ab); struct Vector bc; sub_vector(c, b, &bc); normalise(&ab); normalise(&bc); double v = dot(&ab, &bc); // because v is a double it can be -1.0000001 if (v > 1) v = 1; if (v < -1) v = -1; return acos(v); } double calc_omega(struct Vector *a, struct Vector *b, struct Vector *c, struct Vector *d) { // calculates the ABC angle. struct Vector ab, cd; sub_vector(a, b, &ab); sub_vector(d, c, &cd); normalise(&ab); normalise(&cd); double v = dot(&ab, &cd); if (v > 1) v = 1; if (v < -1) v = -1; return acos(v); } double calc_distance(struct Vector *a, struct Vector *b) { double su = 0; su += powl(a->x - b->x, 2.); su += powl(a->y - b->y, 2.); su += powl(a->z - b->z, 2.); return sqrtl(su); } double bond_energy(struct Atom *a, struct Atom *b, struct Vector *apos, struct Vector *bpos, struct Model *m) { double req = m->eq_bond_length[a->type][b->type]; double r = calc_distance(apos, bpos); double K = m->eq_bond_K[a->type][b->type]; return K * powl(r - req, 2.); } double angle_energy(struct Atom *a, struct Atom *b, struct Atom *c, struct Vector *apos, struct Vector *bpos, struct Vector *cpos, struct Model *m) { // a-b-c double phi0; switch (b->hybridization) { case SP1: phi0 = 180. * (M_PI / 180.);break; case SP2: phi0 = 120. * (M_PI / 180.);break; case SP3: phi0 = 104. * (M_PI / 180.); break; default: phi0 = 104. * (M_PI / 180.); break; } double phi = calc_phi(apos, bpos, cpos); double rijeq = m->eq_bond_length[a->type][b->type]; double rjkeq = m->eq_bond_length[b->type][c->type]; double D = powl(rijeq - rjkeq, 2.) * powl(rijeq + rjkeq, -2.); double Kijk = 143.9 * m->angle_Z[a->type] * m->angle_C[b->type] * m->angle_Z[c->type]; Kijk *= powl(rijeq + rjkeq, -1.); Kijk *= powl(phi0, -2.); Kijk *= expl(-2 * D); return Kijk * powl(phi - phi0, 2.); } double vdw_energy(struct Atom *a, struct Atom *b, struct Vector *apos, struct Vector *bpos, struct Model *m) { double EDEP = sqrtl(m->vdw_EDEP[a->type] * m->vdw_EDEP[b->type]); double sigma = sqrtl(m->vdwR[a->type] * m->vdwR[b->type]); double distance = calc_distance(apos, bpos); double term = sigma / distance; double term6 = term * term * term * term * term * term; double term12 = term6 * term6; return EDEP * (term12 - 2 * term6); } double dihedral_energy(struct Atom *a, struct Atom *b, struct Atom *c, struct Atom *d, struct Vector *apos, struct Vector *bpos, struct Vector *cpos, struct Vector *dpos, struct Model *m) { (void)a; (void)d; double omega = calc_omega(apos, bpos, cpos, dpos); // energy = pref * (1 + cos(n omega - gamma)) double energy = m->dihedral_n[b->type][c->type] * omega - m->dihedral_gamma[b->type][c->type]; energy = cos(energy); energy += 1; energy *= m->dihedral_pref[b->type][c->type]; return energy; } double calc_energy(struct Molecule *m, unsigned int atom_offset, struct Vector * offset) { unsigned int i,j,k,l; struct Atom *a,*b,*c,*d; struct Vector zero; zero.x = 0; zero.y = 0; zero.z = 0; struct Vector apos, bpos, cpos, dpos; double energy = 0; double r; for (i = 0; i < m->n_atoms; i++) { a = &(m->as[i]); add_vector(&(a->v), (i == atom_offset)?offset:&(zero), &apos); for (j = 0; j < a->n_bonds; j++) { // Ebond b = a->bonds[j]; add_vector(&(b->v), (b->i == atom_offset)?offset:&(zero), &bpos); energy += bond_energy(a, b, &apos, &bpos, m->model) / 2.; // divided by 2 as will be A-B and B-A for (k = j+1; k < a->n_bonds; k++) { // Eangle c = a->bonds[k]; add_vector(&(c->v), (c->i == atom_offset)?offset:&(zero), &cpos); energy += angle_energy(b, a, c, &bpos, &apos, &cpos, m->model); // not divided as we only do this path once. for (l = 0; l < c->n_bonds; l++) { // Edihedral d = c->bonds[l]; add_vector(&(d->v), (d->i == atom_offset)?offset:&(zero), &dpos); energy += dihedral_energy(d, c, a, b, &dpos, &cpos, &apos, &bpos, m->model)/2.; // ditto } } } for (j = i+1; j < m->n_atoms; j++) { if (i == j) continue; b = &(m->as[j]); add_vector(&(b->v), (b->i == atom_offset)?offset:&(zero), &bpos); //Evdw energy += vdw_energy(a, b, &apos, &bpos, m->model); // Eelectrostatic // Ees = ke q Q / r double q, Q; r = calc_distance(&apos, &bpos); q = m->model->charge[a->type]; Q = m->model->charge[b->type]; energy += COULOMB_CONSTANT * q * Q / r; } } return energy; } double kcal_to_atomic(double v) { return v * KCAL_ATOMIC; } double atomic_to_kcal(double v) { return v / KCAL_ATOMIC; } void set_temp_vel(struct Molecule *m, int atomid, double temp) { struct Vector v; v.x = ((rand()%100 - 49)/100.); v.y = ((rand()%100 - 49)/100.); v.z = ((rand()%100 - 49)/100.); if (v.x == 0 && v.y == 0 && v.z == 0) v.x = 1; normalise(&v); double mass = m->model->vdwM[m->as[atomid].type]; double temp_factor = sqrtl(2 * BOLTZMANN_K * temp / mass); //if (atomid == 1) //printf("%lf, %lf, %lf :: %lf\n", v.x, v.y, v.z, temp_factor); m->as[atomid].vel.x = v.x * temp_factor; m->as[atomid].vel.y = v.y * temp_factor; m->as[atomid].vel.z = v.z * temp_factor; } double calc_temperature(struct Molecule *m) { double v; //T = m v^2 / (2k) unsigned int i; double sum = 0, mass; for (i = 0; i < m->n_atoms; i++) { v = magnitude(&(m->as[i].vel)); mass = m->model->vdwM[m->as[i].type]; sum += (mass * v * v) / (2 * BOLTZMANN_K); } //printf("Temp: %f K\n", sum/m->n_atoms); return sum / m->n_atoms; } void process_atom(int id, struct Molecule *m, double dn, double dt, double viscosity, double T) { double Tr = T; //calc_temperature(m); struct Vector o1, o2; struct Vector e_grad; double e1, e2; set_vector(&o1, dn, 0, 0); set_vector(&o2, -dn, 0, 0); e1 = kcal_to_atomic(calc_energy(m, id, &o1)); e2 = kcal_to_atomic(calc_energy(m, id, &o2)); e_grad.x = (e1 - e2) / (2 * dn); //printf("e1: %f, e2: %f, dn: %f, grad: %f\n", e1, e2, dn, e_grad.x); set_vector(&o1, 0, dn, 0); set_vector(&o2, 0, -dn, 0); e1 = kcal_to_atomic(calc_energy(m, id, &o1)); e2 = kcal_to_atomic(calc_energy(m, id, &o2)); e_grad.y = (e1 - e2) / (2 * dn); set_vector(&o1, 0, 0, dn); set_vector(&o2, 0, 0, -dn); e1 = kcal_to_atomic(calc_energy(m, id, &o1)); e2 = kcal_to_atomic(calc_energy(m, id, &o2)); e_grad.z = (e1 - e2) / (2 * dn); double mass = m->model->vdwM[m->as[id].type]; struct Vector accel; // potential term double t3; //t1 = -e_grad.x; accel.x = -e_grad.x; accel.y = -e_grad.y; accel.z = -e_grad.z; // energy units are amu A^2 ps^-2, so units of gradient are amu A ps^-2. if (m->thermostat == LANGEVIN) { //t2 = -viscosity * mass * m->as[id].vel.x; accel.x -= viscosity * mass * m->as[id].vel.x; accel.y -= viscosity * mass * m->as[id].vel.y; accel.z -= viscosity * mass * m->as[id].vel.z; // not sure about viscosity units. reported to be ps^-1, but there's no mass term here. // so amu(?) ps^-1 A ps^-1 = amu A ps^-2 double variance = 2 * mass * viscosity * BOLTZMANN_K * Tr / dt; // units are amu * ps^-1 * amu A^2 ps^-2 / K * K / ps // = amu^2 * ps^-4 * A^2 double std = sqrtl(variance); // so the units of the std are amu A ps^-2 t3 = norm_rand(0, std); accel.x += t3; accel.y += norm_rand(0, std); accel.z += norm_rand(0, std); } m->as[id].vel.x += dt * accel.x / mass; m->as[id].vel.y += dt * accel.y / mass; m->as[id].vel.z += dt * accel.z / mass; if (m->thermostat == ANDERSEN) { // with Andersen thermostat, the probability of a collision with bath is given as // P(t) = v exp(-v t) // Giving a proportion of molecules which will have collided in that time. // This proportion of atoms are then assigned random velocity based on the temperature. // Though v in this case is rate of collision, we reuse 'viscosity'. unsigned int k = 0; for (k = 0; k < m->n_atoms; k++) m->as[k].andersen_f = 0; double P = 1000. * viscosity * expl(-viscosity * dt); int Pr = (int) P; int ran = (rand()%1000); //printf("%d, %d -> %s\n", Pr, ran, (ran < Pr)?"yes":"no"); if (ran < Pr) { //printf("%d, %d -> ", Pr, ran); set_temp_vel(m, id, T); } /*while (n_vel > 0) { k = (rand()%m->n_atoms); if (m->as[k].andersen_f == 1) continue; n_vel--; m->as[k].andersen_f = 1; printf("%d %d\n", k, n_vel); set_temp_vel(m, k, T); }*/ } // amu A ps^-2 * ps / amu = A ps^-1 return; } /* bond_xyz() * XYZ files do not contain bond information; this function adds bonds * between all atoms where the distance between the atoms is < bl. * arg *m: Molecule pointer * arg bl: Maximum bond length * TODO: Make bl able to vary depending on atoms involved * e.g., C-H bond shorter than C-C. */ void bond_xyz(struct Molecule *m, float bl) { unsigned int i, j; struct Vector temp; for (i = 0; i < m->n_atoms; i++) { for (j = i+1; j < m->n_atoms; j++) { // calculate magnitude of vector joining them sub_vector(&(m->as[i].v), &(m->as[j].v), &temp); if (magnitude(&temp) < bl) { // and if below bl, add a bond. add_bond(&(m->as[i]), &(m->as[j])); printf("Bonding %d - %d\n", i, j); } } } return; } /* read_xyz() * Reads xyz file *filename, and generates atomic positions and puts * them into molecule *m. * arg *m: Molecule pointer * arg *filename: Filename. * returns: -1 if file does not exist or is wrong format. * 1 if okay. */ int read_xyz(struct Molecule *m, char *filename) { FILE * fp; char line[255]; size_t len=255; printf("Reading xyz\n"); fp = fopen(filename, "r"); if (fp == NULL) { printf("%s not found.\n", filename); return -1; } unsigned int c_line = 0; struct Atom *ca; while (fgets(line, len, fp)) { if (c_line == 0) { // first line in an xyz file gives the number of atoms. int k = sscanf(line, "%d\n", &(m->n_atoms)); // if there is no number, close and exit. if (k != 1) { fclose(fp); return -1; } // allocate memory for n_atoms atoms. m->as = (struct Atom *) malloc(sizeof(struct Atom) * m->n_atoms); if (m->as == NULL) crash(m); } else if (c_line >= 2) { // c_line = 1 is a comment line, which we ignore. // if the current atom (c_line - 2) is greater than n_atoms - 1, // then exit. eg if there are 3 atoms, then as[0-2]. if (c_line - 2 > m->n_atoms - 1) { fclose(fp); return -1; } // ca is pointer to the current atom being operated on. ca = &(m->as[c_line - 2]); ca->i = c_line-2; char *token = strtok(line, " \t"); int i = 0; float x=0, y=0, z=0; char name[3]; // while tokens are left... // i gives the column. First column is atom symbol, then x,y,z. while (token) { switch (i) { case 0: sprintf(name, "%2s", token); break; case 1: x = atof(token); break; case 2: y = atof(token); break; case 3: z = atof(token); break; // if there are more than 4 columns, crash and close. default: fclose(fp); return -1; break; } i++; token = strtok(NULL, " \t"); } // add atom, and print out setup. add_atom(ca, x, y, z, name); //printf("%s :: (%f, %f, %f) %d\n", ca->name, ca->v.x, ca->v.y, ca->v.z, c_line-2); } c_line++; } fclose(fp); return 1; } /* save_xyz() * Outputs *m as an xyz file in *filename. Tab delimited, x y z are of * format +00.00000. * returns: -1 if file does not open * 1 otherwise */ int save_xyz(struct Molecule *m, char *filename, char * mode) { FILE * fp; fp = fopen(filename, mode); if (fp == NULL) return -1; double T = calc_temperature(m); fprintf(fp, "%d\n%lf\n", m->n_atoms, T); unsigned int i; for (i = 0; i < m->n_atoms; i++) { fprintf(fp, "%2s\t%-2.6f\t%-2.6f\t%-2.6f\n", m->as[i].name, \ m->as[i].v.x,\ m->as[i].v.y,\ m->as[i].v.z); } fclose(fp); return 1; } /* print_dir() * Prints molecule to screen */ int print_dir(struct Molecule *m) { unsigned int i; for (i = 0; i < m->n_atoms; i++) { printf("%d\t%2s\t%-2.6f\t%-2.6f\t%-2.6f\n", i, m->as[i].name, \ m->as[i].v.x,\ m->as[i].v.y,\ m->as[i].v.z); } return 0; } /*int assign_model(struct Molecule *m, char model_name[255]) { struct Model mod; if (strcmp(model_name, "MM3") == 0) setup_model(&mod, MOD_MM3); else { printf("Model does not exist.\n"); crash(m); } m->model = &mod; return 1; }*/ #define MINIMIZE 0 #define HEAT 1 #define PROD 2 void add_vector_sc(struct Vector *a, struct Vector *b, double dt) { a->x += (b->x * dt); a->y += (b->y * dt); a->z += (b->z * dt); } void propagate(struct Molecule *m, double dn, double dt, unsigned int steps, unsigned int output_step, char fn[500], double temp, double viscosity, int mode) { unsigned int i, k; //void process_atom(int id, struct Molecule *m, double dn, double dt, double viscosity, double T) { if (mode != HEAT) save_xyz(m, fn, "w"); for (i = 0; i < steps; i++) { #pragma omp parallel for for (k = 0; k < m->n_atoms; k++) { if (mode != HEAT) process_atom(k, m, dn, dt, viscosity, temp); add_vector_sc(&(m->as[k].v), &(m->as[k].vel), dt); if (mode == MINIMIZE) set_vector(&(m->as[k].vel), 0, 0, 0); } if (i % output_step == 0) save_xyz(m, fn, "a"); } } void minimize(struct Molecule *m, double dn, double dt, int steps, int output_step, char fn[500]) { propagate(m, dn, dt, steps, output_step, fn, 0, 0, MINIMIZE); } void iterate_once(struct Molecule *m, double dn, double dt, char fn[500], double temp, double viscosity) { propagate(m, dn, dt, 1, 1, fn, temp, viscosity, HEAT); } void heat(struct Molecule *m, double temp, double temp_step, int steps, double dn, double dt, double dnM, double dnT, double viscosity, char fn[500]) { unsigned int i; double T = 0; for (T = 0; T < temp + temp_step; T += temp_step) { for (i = 0; i < m->n_atoms; i++) { set_temp_vel(m, i, T); iterate_once(m, dn, dt, fn, T, viscosity); minimize(m, dnM, dnT, steps, 100, fn); } } return; } void production(struct Molecule *m, double dn, double dt, int steps, int output_step, char fn[500], double temp, double viscosity) { propagate(m, dn, dt, steps, output_step, fn, temp, viscosity, PROD); } /* run_script() * Runs script in file *filename on molecule *m * Commands; * - open FILENAME * Opens xyz file at FILENAME * - bond BL * Generates bonds between atoms where distance < BL (angstroms) * - rotate A B THETA * Rotates atoms connected to B about the A-B axis an amount theta * (radians) * - output FILENAME * Writes output as xyz file to FILENAME. * returns: -1 if file not available * 1 on success */ int run_script(char *filename, struct Molecule *m) { FILE * fp; char line[255]; size_t len=255; fp = fopen(filename, "r"); if (fp == NULL) { printf("%s not found.\n", filename); return -1; } float bl; char command[255]; int c; struct Vector zero; double dn, dt, temp, viscosity, temp_step, dnM, dnT; int steps, output_step; zero.x = 0; zero.y = 0; zero.z = 0; struct Model mod; while (fgets(line, len, fp)) { // reset system checks reset_check(m); // read in line, up to point c sscanf(line, "%255s %n", command, &c); if (command[0] == '%') continue; if (strcmp(command, "open") == 0) { // if there is no second argument, complain. if (sscanf(line+c, "%255s", command) != 1) { printf("error reading script\n%s", line); break; } read_xyz(m, command); } else if (strcmp(command, "bond") == 0) { if (sscanf(line+c, "%f", &bl) != 1) { printf("Error reading script\n%s", line); break; } bond_xyz(m, bl); } else if (strcmp(command, "output") == 0) { if (sscanf(line+c, "%255s", command) != 1) { printf("Error reading script\n%s", line); break; } save_xyz(m, command, "w"); } else if (strcmp(command, "model") == 0) { if (sscanf(line + c, "%255s", command) != 1) { printf("Error reading script\n%s", line); break; } if (strcmp(command, "GAFF") == 0) setup_model(&mod, MOD_GAFF); m->model = &mod; } else if (strcmp(command, "hybridize") == 0) { //printf("%s\n", command); if (sscanf(line+c, "%d %s", &steps, command) != 2) { printf("Error reading script\n%s", line); break; } //printf("%d steps\n", steps); if (strcmp(command, "SP") == 0) m->as[steps].hybridization = SP1; else if (strcmp(command, "SP2") == 0) m->as[steps].hybridization = SP2; else if (strcmp(command, "SP3") == 0) m->as[steps].hybridization = SP3; else printf("Hybridization not recognised %s\n", command); } else if (strcmp(command, "energy") == 0) { if (m->model == NULL) printf("No model assigned\n"); else printf("Energy: %f kcal/mol\n", calc_energy(m, 0, &zero)); } else if (strcmp(command, "temperature") == 0) { printf("Temperature: %f K\n", calc_temperature(m)); } else if (strcmp(command, "minimize") == 0) { if (sscanf(line + c, "%lf %lf %d %d %s", &dn, &dt, &steps, &output_step, command) != 5) { printf("Error reading script\n%s", line); break; } if (steps < 0) break; minimize(m, dn, dt, steps, output_step, command); } else if (strcmp(command, "heat") == 0) { if (sscanf(line + c, "%lf %lf %d %lf %lf %lf %lf %lf %s", &temp, &temp_step, &steps, &dn, &dt, &dnM, &dnT, &viscosity, command) != 9) { printf("Error reading script\n%s", line); break; } heat(m, temp, temp_step, steps, dn, dt, dnM, dnT, viscosity, command); } else if (strcmp(command, "iterate") == 0) { if (sscanf(line + c, "%lf %lf %lf %lf %s", &dn, &dt, &temp, &viscosity, command) != 5) { printf("Error reading script\n%s", line); break; } if (m->model == NULL) { printf("No model assigned.\n"); break; } if (m->thermostat == -1) { printf("No thermostat assigned.\n"); break; } iterate_once(m, dn, dt, command, temp, viscosity); } else if (strcmp(command, "thermostat") == 0) { if (sscanf(line + c, "%s", command) != 1) { printf("Error reading thermostat\n"); break; } if (strcmp(command, "LANGEVIN") == 0) { printf("Please note the Langevin thermostat is broken.\n"); m->thermostat = LANGEVIN; } else if (strcmp(command, "ANDERSEN") == 0) { m->thermostat = ANDERSEN; } } else if (strcmp(command, "prod") == 0) { if (sscanf(line + c, "%lf %lf %lf %lf %d %d %s", &dn, &dt, &temp, &viscosity, &steps, &output_step, command) != 7) { printf("Error reading script\n%s", line); break; } if (steps < 0) break; if (m->model == NULL) { printf("No model assigned.\n"); break; } if (m->thermostat == -1) { printf("No thermostat assigned.\n"); break; } production(m, dn, dt, steps, output_step, command, temp, viscosity); } } fclose(fp); return 1; } /* main() * Runs system. If a script is passed, runs the script. Else, * enters a do-while loop operating on commands in an interactive manner * args SCRIPT: can pass script filename which will be run in. */ int main(int argc, char *argv[]) { struct Molecule mol; mol.n_atoms = 0; mol.model = NULL; mol.thermostat = -1; char command[255]; int n; char script_name[255]; if (argc >= 2) { strcpy(script_name, argv[1]); run_script(script_name, &mol); } else { /* Interactive mode commands * - open FILENAME * opens xyz file FILENAME * - bond BL * sets up bonds between all atoms with distance < BL angstroms * - graph N * prints out graph starting at atom N * - print * prints out entire system * - rotate A B THETA * applies a rotation to atoms connected to B about the A-B axis * an amount theta radians. * - output FILENAME * outputs as XYZ file into FILENAME * - run SCRIPT * runs script in SCRIPT. * - exit * exits program */ do { reset_check(&mol); printf("> "); scanf("%255s", command); printf("%s\n", command); if (strcmp(command, "open") == 0) { scanf("%255s", command); read_xyz(&mol, command); } else if (strcmp(command, "bond") == 0) { printf(" max bond length (ang) > "); scanf("%255s", command); bond_xyz(&mol, atof(command)); } else if (strcmp(command, "graph") == 0) { printf(" start atom > "); if (scanf("%d", &n) == 1) print_moleculef(&(mol.as[n]), 0); } else if (strcmp(command, "print") == 0) { print_dir(&mol); } else if (strcmp(command, "output") == 0) { printf(" filename > "); if (scanf("%255s", command) != 1) continue; save_xyz(&mol, command, "w"); } else if (strcmp(command, "run") == 0) { printf(" script > "); if (scanf("%255s", command) != 1) continue; run_script(command, &mol); } } while (strcmp(command, "exit") != 0); } // clean up everything free_atoms(&mol); free(mol.as); return 1; }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % John Cristy % % July 1998 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/string_.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const ChannelType channel,const DrawInfo *draw_info, % const MagickPixelPacket target,const ssize_t x_offset, % const ssize_t y_offset,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const ChannelType channel,const DrawInfo *draw_info, const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset, const MagickBooleanType invert) { #define MaxStacksize (1UL << 15) #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; ExceptionInfo *exception; Image *floodplane_image; MagickBooleanType skip; MagickPixelPacket fill, pixel; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize, sizeof(*segment_stack)); if (segment_stack == (SegmentInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Push initial segment on stack. */ exception=(&image->exception); x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetMagickPixelPacket(image,&fill); GetMagickPixelPacket(image,&pixel); image_view=AcquireCacheView(image); floodplane_view=AcquireCacheView(floodplane_image); while (s > segment_stack) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; register PixelPacket *restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y, image->columns-x,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (GetOpacityPixelComponent(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&fill); if ((channel & RedChannel) != 0) SetRedPixelComponent(q,ClampToQuantum(fill.red)); if ((channel & GreenChannel) != 0) SetGreenPixelComponent(q,ClampToQuantum(fill.green)); if ((channel & BlueChannel) != 0) SetBluePixelComponent(q,ClampToQuantum(fill.blue)); if ((channel & OpacityChannel) != 0) SetOpacityPixelComponent(q,ClampToQuantum(fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetIndexPixelComponent(indexes+x,ClampToQuantum(fill.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelPacket *start_color, % const PixelPacket *stop_color) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % This provides a good example of making use of the DrawGradientImage % function and the gradient structure in draw_info. */ static inline double MagickMax(const double x,const double y) { return(x > y ? x : y); } MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelPacket *start_color,const PixelPacket *stop_color) { DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelPacket *) NULL); assert(stop_color != (const PixelPacket *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; gradient->gradient_vector.x2=(double) image->columns-1.0; gradient->gradient_vector.y2=(double) image->rows-1.0; if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; gradient->radius=MagickMax(gradient->center.x,gradient->center.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetMagickPixelPacket(image,&gradient->stops[i].color); SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL, &gradient->stops[0].color); gradient->stops[0].offset=0.0; SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL, &gradient->stops[1].color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); if ((start_color->opacity == OpaqueOpacity) && (stop_color->opacity == OpaqueOpacity)) image->matte=MagickFalse; if ((IsGrayPixel(start_color) != MagickFalse) && (IsGrayPixel(stop_color) != MagickFalse)) image->type=GrayscaleType; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=GetOpenMPMaximumThreads(); histogram=(size_t **) AcquireQuantumMemory(number_threads, sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count, sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **restrict histograms, width; ssize_t y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,0.5); paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (paint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse) { InheritException(exception,&paint_image->exception); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; image_view=AcquireCacheView(image); paint_view=AcquireCacheView(paint_image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict paint_indexes; register ssize_t x; register PixelPacket *restrict q; register size_t *histogram; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view); histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, v; /* Assign most frequent color. */ i=0; j=0; count=0; (void) ResetMagickMemory(histogram,0,NumberPaintBins*sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { k=(ssize_t) ScaleQuantumToChar(PixelIntensityToQuantum(p+u+i)); histogram[k]++; if (histogram[k] > count) { j=i+u; count=histogram[k]; } } i+=(ssize_t) (image->columns+width); } *q=(*(p+j)); if (image->colorspace == CMYKColorspace) SetIndexPixelComponent(paint_indexes+x,GetIndexPixelComponent( indexes+x+j)); p++; q++; } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelPacket *target,const PixelPacket *fill, % const MagickBooleanType invert) % MagickBooleanType OpaquePaintImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill, const MagickBooleanType invert) { return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert)); } MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill,const MagickBooleanType invert) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (MagickPixelPacket *) NULL); assert(fill != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Make image color opaque. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) { if ((channel & RedChannel) != 0) SetRedPixelComponent(q,ClampToQuantum(fill->red)); if ((channel & GreenChannel) != 0) SetGreenPixelComponent(q,ClampToQuantum(fill->green)); if ((channel & BlueChannel) != 0) SetBluePixelComponent(q,ClampToQuantum(fill->blue)); if ((channel & OpacityChannel) != 0) SetOpacityPixelComponent(q,ClampToQuantum(fill->opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetIndexPixelComponent(indexes+x,ClampToQuantum(fill->index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImageChannel) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const MagickPixelPacket *target,const Quantum opacity, const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(target != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, the % TransparentPaintImage() API is not suitable for the operations like chroma, % where the tolerance for similarity of two color component (RGB) can be % different, Thus we define this method take two target pixels (one % low and one hight) and all the pixels of an image which are lying between % these two pixels are made transparent. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *low,const MagickPixelPacket *hight, % const Quantum opacity,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const MagickPixelPacket *low,const MagickPixelPacket *high, const Quantum opacity,const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(high != (MagickPixelPacket *) NULL); assert(low != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,ResetAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; MagickPixelPacket pixel; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
convolution_sgemm.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv_im2col_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_size) { const float* kernel = _kernel; #if __ARM_NEON && __aarch64__ // kernel memory packed 8 x 8 kernel_tm.create(8*kernel_size, inch, outch/8 + (outch%8)/4 + outch%4); #else // kernel memory packed 4 x 8 kernel_tm.create(4*kernel_size, inch, outch/4 + outch%4); #endif int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; const float* k4 = kernel + (p+4)*inch*kernel_size; const float* k5 = kernel + (p+5)*inch*kernel_size; const float* k6 = kernel + (p+6)*inch*kernel_size; const float* k7 = kernel + (p+7)*inch*kernel_size; float* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } #endif nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = kernel + (p+0)*inch*kernel_size; const float* k1 = kernel + (p+1)*inch*kernel_size; const float* k2 = kernel + (p+2)*inch*kernel_size; const float* k3 = kernel + (p+3)*inch*kernel_size; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p/8 + (p%8)/4); #else float* ktmp = kernel_tm.channel(p/4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { const float* k0 = kernel + (p+0)*inch*kernel_size; #if __ARM_NEON && __aarch64__ float* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4); #else float* ktmp = kernel_tm.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch*kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv_im2col_sgemm_neon(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, \ const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // im2col Mat bottom_im2col(outw*outh, kernel_h*kernel_w*inch, elemsize, opt.workspace_allocator); { const int stride = kernel_h*kernel_w*outw*outh; float* ret = (float*)bottom_im2col; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<inch; p++) { const float* input = bottom_blob.channel(p); int retID = stride * p; for (int u=0; u<kernel_h; u++) { for (int v=0; v<kernel_w; v++) { for (int i=0; i<outh; i++) { for (int j=0; j<outw; j++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 8 x 8 Mat bottom_tm(8*kernel_size, inch, out_size/8 + out_size%8, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8); for (int q=0; q<inch*kernel_size; q++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1] \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "cc", "memory", "v0", "v1" ); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0] \n" "vst1.f32 {d0-d3}, [%1] \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; #endif // __ARM_NEON tmpptr += 8; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/8 + i%8); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 8; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); float* output4 = top_blob.channel(i+4); float* output5 = top_blob.channel(i+5); float* output6 = top_blob.channel(i+6); float* output7 = top_blob.channel(i+7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); const float* va = kernel_tm.channel(i/8); #if __ARM_NEON asm volatile( "ld1 {v0.4s, v1.4s}, [%21] \n" "dup v16.4s, v0.s[0] \n"// sum0 "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n"// sum1 "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n"// sum2 "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n"// sum3 "dup v23.4s, v0.s[3] \n" "dup v24.4s, v1.s[0] \n"// sum4 "dup v25.4s, v1.s[0] \n" "dup v26.4s, v1.s[1] \n"// sum5 "dup v27.4s, v1.s[1] \n" "dup v28.4s, v1.s[2] \n"// sum6 "dup v29.4s, v1.s[2] \n" "dup v30.4s, v1.s[3] \n"// sum7 "dup v31.4s, v1.s[3] \n" "lsr w4, %w20, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" // kernel "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "fmla v24.4s, v8.4s, v1.s[0] \n"// sum4 += (a00-a70) * k40 "fmla v25.4s, v9.4s, v1.s[0] \n"// "fmla v26.4s, v8.4s, v1.s[1] \n"// sum5 += (a00-a70) * k50 "fmla v27.4s, v9.4s, v1.s[1] \n"// "fmla v28.4s, v8.4s, v1.s[2] \n"// sum6 += (a00-a70) * k60 "fmla v29.4s, v9.4s, v1.s[2] \n"// "fmla v30.4s, v8.4s, v1.s[3] \n"// sum7 += (a00-a70) * k70 "fmla v31.4s, v9.4s, v1.s[3] \n"// // k1 "fmla v16.4s, v10.4s, v2.s[0] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v2.s[0] \n"// "fmla v18.4s, v10.4s, v2.s[1] \n"// sum1 += (a01-a71) * k11 "fmla v19.4s, v11.4s, v2.s[1] \n"// "fmla v20.4s, v10.4s, v2.s[2] \n"// sum2 += (a01-a71) * k21 "fmla v21.4s, v11.4s, v2.s[2] \n"// "fmla v22.4s, v10.4s, v2.s[3] \n"// sum3 += (a01-a71) * k31 "fmla v23.4s, v11.4s, v2.s[3] \n"// "fmla v24.4s, v10.4s, v3.s[0] \n"// sum4 += (a01-a71) * k41 "fmla v25.4s, v11.4s, v3.s[0] \n"// "fmla v26.4s, v10.4s, v3.s[1] \n"// sum5 += (a01-a71) * k51 "fmla v27.4s, v11.4s, v3.s[1] \n"// "fmla v28.4s, v10.4s, v3.s[2] \n"// sum6 += (a01-a71) * k61 "fmla v29.4s, v11.4s, v3.s[2] \n"// "fmla v30.4s, v10.4s, v3.s[3] \n"// sum7 += (a01-a71) * k71 "fmla v31.4s, v11.4s, v3.s[3] \n"// // k2 "fmla v16.4s, v12.4s, v4.s[0] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v4.s[0] \n"// "fmla v18.4s, v12.4s, v4.s[1] \n"// sum1 += (a02-a72) * k12 "fmla v19.4s, v13.4s, v4.s[1] \n"// "fmla v20.4s, v12.4s, v4.s[2] \n"// sum2 += (a02-a72) * k22 "fmla v21.4s, v13.4s, v4.s[2] \n"// "fmla v22.4s, v12.4s, v4.s[3] \n"// sum3 += (a02-a72) * k32 "fmla v23.4s, v13.4s, v4.s[3] \n"// "fmla v24.4s, v12.4s, v5.s[0] \n"// sum4 += (a02-a72) * k42 "fmla v25.4s, v13.4s, v5.s[0] \n"// "fmla v26.4s, v12.4s, v5.s[1] \n"// sum5 += (a02-a72) * k52 "fmla v27.4s, v13.4s, v5.s[1] \n"// "fmla v28.4s, v12.4s, v5.s[2] \n"// sum6 += (a02-a72) * k62 "fmla v29.4s, v13.4s, v5.s[2] \n"// "fmla v30.4s, v12.4s, v5.s[3] \n"// sum7 += (a02-a72) * k72 "fmla v31.4s, v13.4s, v5.s[3] \n"// // k3 "fmla v16.4s, v14.4s, v6.s[0] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v6.s[0] \n"// "fmla v18.4s, v14.4s, v6.s[1] \n"// sum1 += (a03-a73) * k13 "fmla v19.4s, v15.4s, v6.s[1] \n"// "fmla v20.4s, v14.4s, v6.s[2] \n"// sum2 += (a03-a73) * k23 "fmla v21.4s, v15.4s, v6.s[2] \n"// "fmla v22.4s, v14.4s, v6.s[3] \n"// sum3 += (a03-a73) * k33 "fmla v23.4s, v15.4s, v6.s[3] \n"// "fmla v24.4s, v14.4s, v7.s[0] \n"// sum4 += (a03-a73) * k43 "fmla v25.4s, v15.4s, v7.s[0] \n"// "fmla v26.4s, v14.4s, v7.s[1] \n"// sum5 += (a03-a73) * k53 "fmla v27.4s, v15.4s, v7.s[1] \n"// "fmla v28.4s, v14.4s, v7.s[2] \n"// sum6 += (a03-a73) * k63 "fmla v29.4s, v15.4s, v7.s[2] \n"// "fmla v30.4s, v14.4s, v7.s[3] \n"// sum7 += (a03-a73) * k73 "fmla v31.4s, v15.4s, v7.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "fmla v24.4s, v8.4s, v1.s[0] \n"// sum4 += (a00-a70) * k40 "fmla v25.4s, v9.4s, v1.s[0] \n"// "fmla v26.4s, v8.4s, v1.s[1] \n"// sum5 += (a00-a70) * k50 "fmla v27.4s, v9.4s, v1.s[1] \n"// "fmla v28.4s, v8.4s, v1.s[2] \n"// sum6 += (a00-a70) * k60 "fmla v29.4s, v9.4s, v1.s[2] \n"// "fmla v30.4s, v8.4s, v1.s[3] \n"// sum7 += (a00-a70) * k70 "fmla v31.4s, v9.4s, v1.s[3] \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" "st1 {v18.4s, v19.4s}, [%1] \n" "st1 {v20.4s, v21.4s}, [%2] \n" "st1 {v22.4s, v23.4s}, [%3] \n" "st1 {v24.4s, v25.4s}, [%4] \n" "st1 {v26.4s, v27.4s}, [%5] \n" "st1 {v28.4s, v29.4s}, [%6] \n" "st1 {v30.4s, v31.4s}, [%7] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(output4), // %4 "=r"(output5), // %5 "=r"(output6), // %6 "=r"(output7), // %7 "=r"(vb), // %8 "=r"(va) // %9 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(output4), "5"(output5), "6"(output6), "7"(output7), "8"(vb), "9"(va), "r"(L), // %20 "r"(biasptr) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; va += 8; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; sum4[n] += va[4] * vb[n+8]; sum5[n] += va[5] * vb[n+8]; sum6[n] += va[6] * vb[n+8]; sum7[n] += va[7] * vb[n+8]; va += 8; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; sum4[n] += va[4] * vb[n+16]; sum5[n] += va[5] * vb[n+16]; sum6[n] += va[6] * vb[n+16]; sum7[n] += va[7] * vb[n+16]; va += 8; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; sum4[n] += va[4] * vb[n+24]; sum5[n] += va[5] * vb[n+24]; sum6[n] += va[6] * vb[n+24]; sum7[n] += va[7] * vb[n+24]; va += 8; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; sum4[n] += va[4] * vb[n+32]; sum5[n] += va[5] * vb[n+32]; sum6[n] += va[6] * vb[n+32]; sum7[n] += va[7] * vb[n+32]; va += 8; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; sum4[n] += va[4] * vb[n+40]; sum5[n] += va[5] * vb[n+40]; sum6[n] += va[6] * vb[n+40]; sum7[n] += va[7] * vb[n+40]; va += 8; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; sum4[n] += va[4] * vb[n+48]; sum5[n] += va[5] * vb[n+48]; sum6[n] += va[6] * vb[n+48]; sum7[n] += va[7] * vb[n+48]; va += 8; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; sum4[n] += va[4] * vb[n+56]; sum5[n] += va[5] * vb[n+56]; sum6[n] += va[6] * vb[n+56]; sum7[n] += va[7] * vb[n+56]; va -= 56; } va += 64; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; output4[n] = sum4[n] + biasptr[4]; output5[n] = sum5[n] + biasptr[5]; output6[n] = sum6[n] + biasptr[6]; output7[n] = sum7[n] + biasptr[7]; } #endif // __ARM_NEON output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); const float* va = kernel_tm.channel(i/8); #if __ARM_NEON asm volatile( "ld1 {v14.4s, v15.4s}, [%21] \n" // sum0_7 inital with bias "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "eor v20.16b, v20.16b, v20.16b \n" // sum4 "eor v21.16b, v21.16b, v21.16b \n" // sum5 "eor v22.16b, v22.16b, v22.16b \n" // sum6 "eor v23.16b, v23.16b, v23.16b \n" // sum7 "lsr w4, %w20, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" // k "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" // d // k0 "fmla v16.4s, v0.4s, v8.s[0] \n"// sum0 += (k00-k70) * a00 "fmla v17.4s, v1.4s, v8.s[0] \n"// "fmla v18.4s, v2.4s, v8.s[1] \n"// sum1 += (k01-k71) * a10 "fmla v19.4s, v3.4s, v8.s[1] \n"// "fmla v20.4s, v4.4s, v8.s[2] \n"// sum2 += (k02-k72) * a20 "fmla v21.4s, v5.4s, v8.s[2] \n"// "fmla v22.4s, v6.4s, v8.s[3] \n"// sum3 += (k03-k73) * a30 "fmla v23.4s, v7.4s, v8.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "fadd v16.4s, v16.4s, v20.4s \n" "fadd v17.4s, v17.4s, v21.4s \n" "fadd v14.4s, v14.4s, v16.4s \n" "fadd v15.4s, v15.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "prfm pldl1keep, [%8, #32] \n" "ld1r {v8.4s}, [%8], #4 \n" // k0 "fmla v14.4s, v8.4s, v0.4s \n"// sum0 += (k00-k70) * a00 "fmla v15.4s, v8.4s, v1.4s \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v14.s}[0], [%0] \n" "st1 {v14.s}[1], [%1] \n" "st1 {v14.s}[2], [%2] \n" "st1 {v14.s}[3], [%3] \n" "st1 {v15.s}[0], [%4] \n" "st1 {v15.s}[1], [%5] \n" "st1 {v15.s}[2], [%6] \n" "st1 {v15.s}[3], [%7] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(output4), // %4 "=r"(output5), // %5 "=r"(output6), // %6 "=r"(output7), // %7 "=r"(vb), // %8 "=r"(va) // %9 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(output4), "5"(output5), "6"(output6), "7"(output7), "8"(vb), "9"(va), "r"(L), // %20 "r"(biasptr) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; float sum4 = biasptr[4]; float sum5 = biasptr[5]; float sum6 = biasptr[6]; float sum7 = biasptr[7]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __ARM_NEON output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } #endif // __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4); #else const float* va = kernel_tm.channel(i/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%13] \n" "dup v16.4s, v0.s[0] \n"// sum0 "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n"// sum1 "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n"// sum2 "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n"// sum3 "dup v23.4s, v0.s[3] \n" "lsr w4, %w12, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" // kernel "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" "subs w4, w4, #1 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// // k1 "fmla v16.4s, v10.4s, v1.s[0] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v1.s[0] \n"// "fmla v18.4s, v10.4s, v1.s[1] \n"// sum1 += (a01-a71) * k11 "fmla v19.4s, v11.4s, v1.s[1] \n"// "fmla v20.4s, v10.4s, v1.s[2] \n"// sum2 += (a01-a71) * k21 "fmla v21.4s, v11.4s, v1.s[2] \n"// "fmla v22.4s, v10.4s, v1.s[3] \n"// sum3 += (a01-a71) * k31 "fmla v23.4s, v11.4s, v1.s[3] \n"// // k2 "fmla v16.4s, v12.4s, v2.s[0] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v2.s[0] \n"// "fmla v18.4s, v12.4s, v2.s[1] \n"// sum1 += (a02-a72) * k12 "fmla v19.4s, v13.4s, v2.s[1] \n"// "fmla v20.4s, v12.4s, v2.s[2] \n"// sum2 += (a02-a72) * k22 "fmla v21.4s, v13.4s, v2.s[2] \n"// "fmla v22.4s, v12.4s, v2.s[3] \n"// sum3 += (a02-a72) * k32 "fmla v23.4s, v13.4s, v2.s[3] \n"// // k3 "fmla v16.4s, v14.4s, v3.s[0] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v3.s[0] \n"// "fmla v18.4s, v14.4s, v3.s[1] \n"// sum1 += (a03-a73) * k13 "fmla v19.4s, v15.4s, v3.s[1] \n"// "fmla v20.4s, v14.4s, v3.s[2] \n"// sum2 += (a03-a73) * k23 "fmla v21.4s, v15.4s, v3.s[2] \n"// "fmla v22.4s, v14.4s, v3.s[3] \n"// sum3 += (a03-a73) * k33 "fmla v23.4s, v15.4s, v3.s[3] \n"// "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4], #32 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// "fmla v18.4s, v8.4s, v0.s[1] \n"// sum1 += (a00-a70) * k10 "fmla v19.4s, v9.4s, v0.s[1] \n"// "fmla v20.4s, v8.4s, v0.s[2] \n"// sum2 += (a00-a70) * k20 "fmla v21.4s, v9.4s, v0.s[2] \n"// "fmla v22.4s, v8.4s, v0.s[3] \n"// sum3 += (a00-a70) * k30 "fmla v23.4s, v9.4s, v0.s[3] \n"// "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" "st1 {v18.4s, v19.4s}, [%1] \n" "st1 {v20.4s, v21.4s}, [%2] \n" "st1 {v22.4s, v23.4s}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else asm volatile( "vld1.f32 {d0-d1}, [%13] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[0] \n" "vdup.f32 q10, d0[1] \n" "vdup.f32 q11, d0[1] \n" "vdup.f32 q12, d1[0] \n" "vdup.f32 q13, d1[0] \n" "vdup.f32 q14, d1[1] \n" "vdup.f32 q15, d1[1] \n" "lsr r4, %12, #2 \n"// r4 = nn = L >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// kernel "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n"// data "vmla.f32 q8, q4, d0[0] \n"// sum0 = (a00-a07) * k00 "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n"// sum1 = (a00-a07) * k10 "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n"// sum2 = (a00-a07) * k20 "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n"// sum3 = (a00-a07) * k30 "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n"// sum0 += (a10-a17) * k01 "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n"// sum1 += (a10-a17) * k11 "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n"// sum2 += (a10-a17) * k21 "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n"// sum3 += (a10-a17) * k31 "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n"// data "vmla.f32 q8, q4, d4[0] \n"// sum0 += (a20-a27) * k02 "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n"// sum1 += (a20-a27) * k12 "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n"// sum2 += (a20-a27) * k22 "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n"// sum3 += (a20-a27) * k32 "vmla.f32 q15, q5, d5[1] \n" "vmla.f32 q8, q6, d6[0] \n"// sum0 += (a30-a37) * k03 "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n"// sum1 += (a30-a37) * k13 "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n"// sum2 += (a30-a37) * k23 "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n"// sum3 += (a30-a37) * k33 "vmla.f32 q15, q7, d7[1] \n" "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4]! \n" "vmla.f32 q8, q4, d0[0] \n"// sum0 += (a00-a70) * k00 "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n"// sum1 += (a00-a70) * k10 "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n"// sum2 += (a00-a70) * k20 "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n"// sum3 += (a00-a70) * k30 "vmla.f32 q15, q5, d1[1] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.f32 {d16-d19}, [%0] \n" "vst1.f32 {d20-d23}, [%1] \n" "vst1.f32 {d24-d27}, [%2] \n" "vst1.f32 {d28-d31}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+32]; sum1[n] += va[1] * vb[n+32]; sum2[n] += va[2] * vb[n+32]; sum3[n] += va[3] * vb[n+32]; va += 4; sum0[n] += va[0] * vb[n+40]; sum1[n] += va[1] * vb[n+40]; sum2[n] += va[2] * vb[n+40]; sum3[n] += va[3] * vb[n+40]; va += 4; sum0[n] += va[0] * vb[n+48]; sum1[n] += va[1] * vb[n+48]; sum2[n] += va[2] * vb[n+48]; sum3[n] += va[3] * vb[n+48]; va += 4; sum0[n] += va[0] * vb[n+56]; sum1[n] += va[1] * vb[n+56]; sum2[n] += va[2] * vb[n+56]; sum3[n] += va[3] * vb[n+56]; va -= 28; } va += 32; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n=0; n<8; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __ARM_NEON output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j<N; j++) { float* vb = bottom_tm.channel(j/8 + j%8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4); #else const float* va = kernel_tm.channel(i/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v14.4s}, [%13] \n" // sum0_3 inital with bias "lsr w4, %w12, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" // k "prfm pldl1keep, [%4, #128] \n" "ld1 {v8.4s}, [%4], #16 \n" // d "subs w4, w4, #1 \n" "fmla v16.4s, v0.4s, v8.s[0] \n"// sum0 += (k00-k30) * a00 "fmla v17.4s, v1.4s, v8.s[1] \n"// sum1 += (k01-k31) * a10 "fmla v18.4s, v2.4s, v8.s[2] \n"// sum2 += (k02-k32) * a20 "fmla v19.4s, v3.4s, v8.s[3] \n"// sum3 += (k03-k33) * a30 "bne 0b \n" "add v16.4s, v16.4s, v18.4s \n" "add v17.4s, v17.4s, v19.4s \n" "add v14.4s, v16.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%4, #32] \n" "ld1r {v8.4s}, [%4], #4 \n" "subs w4, w4, #1 \n" // k0 "fmla v14.4s, v8.4s, v0.4s \n"// sum0 += (k00-k30) * a00 "bne 2b \n" "3: \n" "st1 {v14.s}[0], [%0] \n" "st1 {v14.s}[1], [%1] \n" "st1 {v14.s}[2], [%2] \n" "st1 {v14.s}[3], [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else asm volatile( // inch loop "vld1.f32 {d24-d25}, [%13] \n" "lsr r4, %12, #2 \n"// r4 = nn = L >> 2 "cmp r4, #0 \n" "beq 1f \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "0: \n"// for(; nn != 0; nn--) "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n"// kernel "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4]! \n"// data "vmla.f32 q8, q0, d8[0] \n"// (k00-k30) * a00 "vmla.f32 q9, q1, d8[1] \n"// (k01-k31) * a01 "vmla.f32 q10, q2, d9[0] \n"// (k02-k32) * a02 "vmla.f32 q11, q3, d9[1] \n"// (k03-k33) * a03 "subs r4, r4, #1 \n" "bne 0b \n"// end for "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q12, q12, q8 \n" "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%4, #32] \n" "vld1.f32 {d8[],d9[]}, [%4]! \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q0, q4 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.f32 {d24[0]}, [%0] \n" "vst1.f32 {d24[1]}, [%1] \n" "vst1.f32 {d25[0]}, [%2] \n" "vst1.f32 {d25[1]}, [%3] \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(output2), // %2 "=r"(output3), // %3 "=r"(vb), // %4 "=r"(va) // %5 : "0"(output0), "1"(output1), "2"(output2), "3"(output3), "4"(vb), "5"(va), "r"(L), // %12 "r"(biasptr) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12" ); #endif // __aarch64__ #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __ARM_NEON output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+7<N; j=j+8) { const float* vb = bottom_tm.channel(j/8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #else const float* va = kernel_tm.channel(i/4 + i%4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v16.4s, %w7 \n" // sum0 "dup v17.4s, %w7 \n" // sum0n "lsr w4, %w6, #2 \n"// r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n"// for (; k+3<L; k=k+4) "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" // data "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" // k0 "fmla v16.4s, v8.4s, v0.s[0] \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v9.4s, v0.s[0] \n"// // k1 "fmla v16.4s, v10.4s, v0.s[1] \n"// sum0 += (a01-a71) * k01 "fmla v17.4s, v11.4s, v0.s[1] \n"// // k2 "fmla v16.4s, v12.4s, v0.s[2] \n"// sum0 += (a02-a72) * k02 "fmla v17.4s, v13.4s, v0.s[2] \n"// // k3 "fmla v16.4s, v14.4s, v0.s[3] \n"// sum0 += (a03-a73) * k03 "fmla v17.4s, v15.4s, v0.s[3] \n"// "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1], #32 \n" "subs w4, w4, #1 \n" // k0 "fmla v16.4s, v0.4s, v8.4s \n"// sum0 += (a00-a70) * k00 "fmla v17.4s, v0.4s, v9.4s \n"// "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0] \n" : "=r"(output), // %0 "=r"(vb), // %1 "=r"(va) // %2 : "0"(output), "1"(vb), "2"(va), "r"(L), // %6 "r"(bias0) // %7 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17" ); #else asm volatile( "vdup.f32 q8, %7 \n" "vdup.f32 q9, %7 \n" // inch loop "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0] \n" : "=r"(output), // %0 "=r"(vb), // %1 "=r"(va) // %2 : "0"(output), "1"(vb), "2"(va), "r"(L), // %6 "r"(bias0) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else float sum[8] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+8]; sum[n] += va[2] * vb[n+16]; sum[n] += va[3] * vb[n+24]; sum[n] += va[4] * vb[n+32]; sum[n] += va[5] * vb[n+40]; sum[n] += va[6] * vb[n+48]; sum[n] += va[7] * vb[n+56]; } va += 8; vb += 64; } for (; k<L; k++) { for (int n=0; n<8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n=0; n<8; n++) { output[n] = sum[n] + bias0; } #endif // __ARM_NEON output += 8; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/8 + j%8); #if __ARM_NEON && __aarch64__ const float* va = kernel_tm.channel(i/8 + (i%8)/4 + i%4); #else const float* va = kernel_tm.channel(i/4 + i%4); #endif // __ARM_NEON && __aarch64__ int k=0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; k+3<L; k+=4) { float32x4_t _p0 = vld1q_f32(vb); vb += 4; float32x4_t _k0 = vld1q_f32(va); va += 4; #if __aarch64__ _sum0 = vfmaq_f32(_sum0, _p0, _k0); #else _sum0 = vmlaq_f32(_sum0, _p0, _k0); #endif } #if __aarch64__ float sum0 = bias0 + vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0); #endif #else float sum0 = bias0; #endif // __ARM_NEON for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } }
reciprocal_to_normal.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <math.h> #include "reciprocal_to_normal.h" #include "lapack_wrapper.h" #ifdef MEASURE_R2N #include <unistd.h> #include <time.h> #endif static lapack_complex_double fc3_sum_in_reciprocal_to_normal (const long bi0, const long bi1, const long bi2, const lapack_complex_double *eigvecs0, const lapack_complex_double *eigvecs1, const lapack_complex_double *eigvecs2, const lapack_complex_double *fc3_reciprocal, const double *masses, const long num_atom); static double get_fc3_sum (const long j, const long k, const long bi, const double *freqs0, const double *freqs1, const double *freqs2, const lapack_complex_double *eigvecs0, const lapack_complex_double *eigvecs1, const lapack_complex_double *eigvecs2, const lapack_complex_double *fc3_reciprocal, const double *masses, const long num_atom, const double cutoff_frequency); void reciprocal_to_normal_squared (double *fc3_normal_squared, const long (*g_pos)[4], const long num_g_pos, const lapack_complex_double *fc3_reciprocal, const double *freqs0, const double *freqs1, const double *freqs2, const lapack_complex_double *eigvecs0, const lapack_complex_double *eigvecs1, const lapack_complex_double *eigvecs2, const double *masses, const long *band_indices, const long num_band0, const long num_band, const double cutoff_frequency, const long openmp_at_bands) { long i, num_atom; #ifdef MEASURE_R2N double loopTotalCPUTime, loopTotalWallTime; time_t loopStartWallTime; clock_t loopStartCPUTime; #endif num_atom = num_band / 3; #ifdef MEASURE_R2N loopStartWallTime = time(NULL); loopStartCPUTime = clock(); #endif #pragma omp parallel for if (openmp_at_bands) for (i = 0; i < num_g_pos; i++) { if (freqs0[band_indices[g_pos[i][0]]] > cutoff_frequency) { fc3_normal_squared[g_pos[i][3]] = get_fc3_sum(g_pos[i][1], g_pos[i][2], band_indices[g_pos[i][0]], freqs0, freqs1, freqs2, eigvecs0, eigvecs1, eigvecs2, fc3_reciprocal, masses, num_atom, cutoff_frequency); } } #ifdef MEASURE_R2N loopTotalCPUTime = (double)(clock() - loopStartCPUTime) / CLOCKS_PER_SEC; loopTotalWallTime = difftime(time(NULL), loopStartWallTime); printf(" %1.3fs (%1.3fs CPU)\n", loopTotalWallTime, loopTotalCPUTime); #endif } static double get_fc3_sum (const long j, const long k, const long bi, const double *freqs0, const double *freqs1, const double *freqs2, const lapack_complex_double *eigvecs0, const lapack_complex_double *eigvecs1, const lapack_complex_double *eigvecs2, const lapack_complex_double *fc3_reciprocal, const double *masses, const long num_atom, const double cutoff_frequency) { double fff, sum_real, sum_imag; lapack_complex_double fc3_sum; if (freqs1[j] > cutoff_frequency && freqs2[k] > cutoff_frequency) { fff = freqs0[bi] * freqs1[j] * freqs2[k]; fc3_sum = fc3_sum_in_reciprocal_to_normal (bi, j, k, eigvecs0, eigvecs1, eigvecs2, fc3_reciprocal, masses, num_atom); sum_real = lapack_complex_double_real(fc3_sum); sum_imag = lapack_complex_double_imag(fc3_sum); return (sum_real * sum_real + sum_imag * sum_imag) / fff; } else { return 0; } } static lapack_complex_double fc3_sum_in_reciprocal_to_normal (const long bi0, const long bi1, const long bi2, const lapack_complex_double *eigvecs0, const lapack_complex_double *eigvecs1, const lapack_complex_double *eigvecs2, const lapack_complex_double *fc3_reciprocal, const double *masses, const long num_atom) { long baseIndex, index_l, index_lm, i, j, k, l, m, n; double sum_real, sum_imag, mmm, mass_l, mass_lm; lapack_complex_double eig_prod, eig_prod1; sum_real = 0; sum_imag = 0; for (l = 0; l < num_atom; l++) { mass_l = masses[l]; index_l = l * num_atom * num_atom * 27; for (m = 0; m < num_atom; m++) { mass_lm = mass_l * masses[m]; index_lm = index_l + m * num_atom * 27; for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { eig_prod1 = phonoc_complex_prod (eigvecs0[(l * 3 + i) * num_atom * 3 + bi0], eigvecs1[(m * 3 + j) * num_atom * 3 + bi1]); for (n = 0; n < num_atom; n++) { mmm = 1.0 / sqrt(mass_lm * masses[n]); baseIndex = index_lm + n * 27 + i * 9 + j * 3; for (k = 0; k < 3; k++) { eig_prod = phonoc_complex_prod (eig_prod1, eigvecs2[(n * 3 + k) * num_atom * 3 + bi2]); eig_prod = phonoc_complex_prod (eig_prod, fc3_reciprocal[baseIndex + k]); sum_real += lapack_complex_double_real(eig_prod) * mmm; sum_imag += lapack_complex_double_imag(eig_prod) * mmm; } } } } } } return lapack_make_complex_double(sum_real, sum_imag); }
1454.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (j, k) num_threads(4) { /* E := A*B */ #pragma omp for schedule(static, 16) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } /* F := C*D */ #pragma omp for schedule(static, 16) for (i = 0; i < _PB_NJ; i++) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } /* G := E*F */ #pragma omp for schedule(static, 16) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
pi.c
/* * Copyright (C) 2j010 - 2015 Intel Corporation. All Rights Reserved. * * The source code contained or described herein and all * documents related to the source code ("Material") are owned by * Intel Corporation or its suppliers or licensors. Title to the * Material remains with Intel Corporation or its suppliers and * licensors. The Material is protected by worldwide copyright * laws and treaty provisions. No part of the Material may be * used, copied, reproduced, modified, published, uploaded, * posted, transmitted, distributed, or disclosed in any way * except as expressly provided in the license provided with the * Materials. No license under any patent, copyright, trade * secret or other intellectual property right is granted to or * conferred upon you by disclosure or delivery of the Materials, * either expressly, by implication, inducement, estoppel or * otherwise, except as expressly provided in the license * provided with the Materials * */ // Simple minded matrix multiply #include <stdio.h> #include <sys/time.h> #define N 2000000000 double f( double x ) ; double clock_it(void) { double duration = 0.0; struct timeval start; gettimeofday(&start, NULL); duration = (double)(start.tv_sec + start.tv_usec/1000000.0); return duration; } main() { double sum, pi, x, h; double start_time, stop_time; int i; h = (double)1.0/(double)N; sum = 0.0; start_time = clock_it(); #ifdef _OPENMP #pragma omp parallel for private(x) reduction(+:sum) #endif for ( i=0; i<N ; i++ ){ x = h*(i-0.5); sum = sum + f(x); } stop_time = clock_it(); // print value of pi to be sure multiplication is correct pi = h*sum; printf(" pi is approximately : %12.9f \n", pi); // print elapsed time printf("Elapsed time = %lf seconds\n",(stop_time - start_time)); }
simde-diagnostic.h
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2017-2020 Evan Nemerson <evan@nemerson.com> */ /* SIMDe targets a very wide range of standards and compilers, and our * goal is to compile cleanly even with extremely aggressive warnings * (i.e., -Weverything in clang, -Wextra in GCC, /W4 for MSVC, etc.) * treated as errors. * * While our preference is to resolve the underlying issue a given * diagnostic is warning us about, sometimes that's not possible. * Fixing a warning in one compiler may cause problems in another. * Sometimes a warning doesn't really apply to us (false positives), * and sometimes adhering to a warning would mean dropping a feature * we *know* the compiler supports since we have tested specifically * for the compiler or feature. * * When practical, warnings are only disabled for specific code. For * a list of warnings which are enabled by default in all SIMDe code, * see SIMDE_DISABLE_UNWANTED_DIAGNOSTICS. Note that we restore the * warning stack when SIMDe is done parsing, so code which includes * SIMDe is not deprived of these warnings. */ #if !defined(SIMDE_DIAGNOSTIC_H) #define SIMDE_DIAGNOSTIC_H #include "hedley.h" #include "simde-detect-clang.h" /* This is only to help us implement functions like _mm_undefined_ps. */ #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) #undef SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif #if HEDLEY_HAS_WARNING("-Wuninitialized") #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wuninitialized\"") #elif HEDLEY_GCC_VERSION_CHECK(4,2,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("GCC diagnostic ignored \"-Wuninitialized\"") #elif HEDLEY_PGI_VERSION_CHECK(19,10,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 549") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE,unassigned)") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,SEC_UNINITIALIZED_MEM_READ,SEC_UNDEFINED_RETURN_VALUE)") #elif HEDLEY_SUNPRO_VERSION_CHECK(5,12,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("error_messages(off,unassigned)") #elif \ HEDLEY_TI_VERSION_CHECK(16,9,9) || \ HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("diag_suppress 551") #elif HEDLEY_INTEL_VERSION_CHECK(13,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ _Pragma("warning(disable:592)") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) && !defined(__MSVC_RUNTIME_CHECKS) #define SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ __pragma(warning(disable:4700)) #endif /* GCC emits a lot of "notes" about the ABI being different for things * in newer versions of GCC. We don't really care because all our * functions are inlined and don't generate ABI. */ #if HEDLEY_GCC_VERSION_CHECK(7,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ _Pragma("GCC diagnostic ignored \"-Wpsabi\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PSABI_ #endif /* Since MMX uses x87 FP registers, you're supposed to call _mm_empty() * after each MMX function before any floating point instructions. * Some compilers warn about functions which use MMX functions but * don't call _mm_empty(). However, since SIMDe is implementyng the * MMX API we shouldn't be calling _mm_empty(); we leave it to the * caller to invoke simde_mm_empty(). */ #if HEDLEY_INTEL_VERSION_CHECK(19,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ _Pragma("warning(disable:13200 13203)") #elif defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ __pragma(warning(disable:4799)) #else #define SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ #endif /* Intel is pushing people to use OpenMP SIMD instead of Cilk+, so they * emit a diagnostic if you use #pragma simd instead of * #pragma omp simd. SIMDe supports OpenMP SIMD, you just need to * compile with -qopenmp or -qopenmp-simd and define * SIMDE_ENABLE_OPENMP. Cilk+ is just a fallback. */ #if HEDLEY_INTEL_VERSION_CHECK(18,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ _Pragma("warning(disable:3948)") #else #define SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ #endif /* MSVC emits a diagnostic when we call a function (like * simde_mm_set_epi32) while initializing a struct. We currently do * this a *lot* in the tests. */ #if \ defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ __pragma(warning(disable:4204)) #else #define SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ #endif /* This warning needs a lot of work. It is triggered if all you do is * pass the value to memcpy/__builtin_memcpy, or if you initialize a * member of the union, even if that member takes up the entire union. * Last tested with clang-10, hopefully things will improve in the * future; if clang fixes this I'd love to enable it. */ #if \ HEDLEY_HAS_WARNING("-Wconditional-uninitialized") #define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ _Pragma("clang diagnostic ignored \"-Wconditional-uninitialized\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ #endif /* This warning is meant to catch things like `0.3 + 0.4 == 0.7`, which * will is false. However, SIMDe uses these operations exclusively * for things like _mm_cmpeq_ps, for which we really do want to check * for equality (or inequality). * * If someone wants to put together a SIMDE_FLOAT_EQUAL(a, op, b) macro * which just wraps a check in some code do disable this diagnostic I'd * be happy to accept it. */ #if \ HEDLEY_HAS_WARNING("-Wfloat-equal") || \ HEDLEY_GCC_VERSION_CHECK(3,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ #endif /* This is because we use HEDLEY_STATIC_ASSERT for static assertions. * If Hedley can't find an implementation it will preprocess to * nothing, which means there will be a trailing semi-colon. */ #if HEDLEY_HAS_WARNING("-Wextra-semi") #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("clang diagnostic ignored \"-Wextra-semi\"") #elif HEDLEY_GCC_VERSION_CHECK(8,1,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ _Pragma("GCC diagnostic ignored \"-Wextra-semi\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ #endif /* We do use a few variadic macros, which technically aren't available * until C99 and C++11, but every compiler I'm aware of has supported * them for much longer. That said, usage is isolated to the test * suite and compilers known to support them. */ #if HEDLEY_HAS_WARNING("-Wvariadic-macros") || HEDLEY_GCC_VERSION_CHECK(4,0,0) #if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ \ _Pragma("clang diagnostic ignored \"-Wvariadic-macros\"") \ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ _Pragma("GCC diagnostic ignored \"-Wvariadic-macros\"") #endif #else #define SIMDE_DIAGNOSTIC_DISABLE_VARIADIC_MACROS_ #endif /* emscripten requires us to use a __wasm_unimplemented_simd128__ macro * before we can access certain SIMD intrinsics, but this diagnostic * warns about it being a reserved name. It is a reserved name, but * it's reserved for the compiler and we are using it to convey * information to the compiler. * * This is also used when enabling native aliases since we don't get to * choose the macro names. */ #if HEDLEY_HAS_WARNING("-Wdouble-promotion") #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ _Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ #endif /* clang 3.8 warns about the packed attribute being unnecessary when * used in the _mm_loadu_* functions. That *may* be true for version * 3.8, but for later versions it is crucial in order to make unaligned * access safe. */ #if HEDLEY_HAS_WARNING("-Wpacked") #define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ _Pragma("clang diagnostic ignored \"-Wpacked\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PACKED_ #endif /* Triggered when assigning a float to a double implicitly. We use * explicit casts in SIMDe, this is only used in the test suite. */ #if HEDLEY_HAS_WARNING("-Wdouble-promotion") #define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ _Pragma("clang diagnostic ignored \"-Wdouble-promotion\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_DOUBLE_PROMOTION_ #endif /* Several compilers treat conformant array parameters as VLAs. We * test to make sure we're in C mode (C++ doesn't support CAPs), and * that the version of the standard supports CAPs. We also reject * some buggy compilers like MSVC (the logic is in Hedley if you want * to take a look), but with certain warnings enabled some compilers * still like to emit a diagnostic. */ #if HEDLEY_HAS_WARNING("-Wvla") #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("clang diagnostic ignored \"-Wvla\"") #elif HEDLEY_GCC_VERSION_CHECK(4,3,0) #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ _Pragma("GCC diagnostic ignored \"-Wvla\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_VLA_ #endif #if HEDLEY_HAS_WARNING("-Wused-but-marked-unused") #define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ _Pragma("clang diagnostic ignored \"-Wused-but-marked-unused\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ #endif #if HEDLEY_HAS_WARNING("-Wunused-function") #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ _Pragma("clang diagnostic ignored \"-Wunused-function\"") #elif HEDLEY_GCC_VERSION_CHECK(3,4,0) #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ _Pragma("GCC diagnostic ignored \"-Wunused-function\"") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */ #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ __pragma(warning(disable:4505)) #else #define SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ #endif #if HEDLEY_HAS_WARNING("-Wpass-failed") #define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ _Pragma("clang diagnostic ignored \"-Wpass-failed\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ #endif #if HEDLEY_HAS_WARNING("-Wpadded") #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ _Pragma("clang diagnostic ignored \"-Wpadded\"") #elif HEDLEY_MSVC_VERSION_CHECK(19,0,0) /* Likely goes back further */ #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ __pragma(warning(disable:4324)) #else #define SIMDE_DIAGNOSTIC_DISABLE_PADDED_ #endif #if HEDLEY_HAS_WARNING("-Wzero-as-null-pointer-constant") #define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ _Pragma("clang diagnostic ignored \"-Wzero-as-null-pointer-constant\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_ZERO_AS_NULL_POINTER_CONSTANT_ #endif #if HEDLEY_HAS_WARNING("-Wold-style-cast") #define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_OLD_STYLE_CAST_ #endif #if HEDLEY_HAS_WARNING("-Wcast-function-type") || HEDLEY_GCC_VERSION_CHECK(8,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ _Pragma("GCC diagnostic ignored \"-Wcast-function-type\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CAST_FUNCTION_TYPE_ #endif /* clang will emit this warning when we use C99 extensions whan not in * C99 mode, even though it does support this. In such cases we check * the compiler and version first, so we know it's not a problem. */ #if HEDLEY_HAS_WARNING("-Wc99-extensions") #define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc99-extensions\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_C99_EXTENSIONS_ #endif /* https://github.com/simd-everywhere/simde/issues/277 */ #if defined(HEDLEY_GCC_VERSION) && HEDLEY_GCC_VERSION_CHECK(4,6,0) && !HEDLEY_GCC_VERSION_CHECK(6,4,0) && defined(__cplusplus) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ _Pragma("GCC diagnostic ignored \"-Wunused-but-set-variable\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ #endif /* This is the warning that you normally define _CRT_SECURE_NO_WARNINGS * to silence, but you have to do that before including anything and * that would require reordering includes. */ #if defined(_MSC_VER) #define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ __pragma(warning(disable:4996)) #else #define SIMDE_DIAGNOSTIC_DISABLE_ANNEX_K_ #endif /* Some compilers, such as clang, may use `long long` for 64-bit * integers, but `long long` triggers a diagnostic with * -Wc++98-compat-pedantic which says 'long long' is incompatible with * C++98. */ #if HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") #if HEDLEY_HAS_WARNING("-Wc++11-long-long") #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") \ _Pragma("clang diagnostic ignored \"-Wc++11-long-long\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ _Pragma("clang diagnostic ignored \"-Wc++98-compat-pedantic\"") #endif #else #define SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ #endif /* Some problem as above */ #if HEDLEY_HAS_WARNING("-Wc++11-long-long") #define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ _Pragma("clang diagnostic ignored \"-Wc++11-long-long\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ #endif /* emscripten emits this whenever stdin/stdout/stderr is used in a * macro. */ #if HEDLEY_HAS_WARNING("-Wdisabled-macro-expansion") #define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ _Pragma("clang diagnostic ignored \"-Wdisabled-macro-expansion\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_DISABLED_MACRO_EXPANSION_ #endif /* Clang uses C11 generic selections to implement some AltiVec * functions, which triggers this diagnostic when not compiling * in C11 mode */ #if HEDLEY_HAS_WARNING("-Wc11-extensions") #define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ _Pragma("clang diagnostic ignored \"-Wc11-extensions\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_ #endif /* Clang sometimes triggers this warning in macros in the AltiVec and * NEON headers, or due to missing functions. */ #if HEDLEY_HAS_WARNING("-Wvector-conversion") #define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") /* For NEON, the situation with -Wvector-conversion in clang < 10 is * bad enough that we just disable the warning altogether. */ #if defined(SIMDE_ARCH_ARM) && SIMDE_DETECT_CLANG_VERSION_NOT(10,0,0) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ #endif #else #define SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ #endif #if !defined(SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_) #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ #endif /* Prior to 5.0, clang didn't support disabling diagnostics in * statement exprs. As a result, some macros we use don't * properly silence warnings. */ #if SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-qual") && HEDLEY_HAS_WARNING("-Wcast-align") #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-qual\"") _Pragma("clang diagnostic ignored \"-Wcast-align\"") #elif SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-qual") #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-qual\"") #elif SIMDE_DETECT_CLANG_VERSION_NOT(5,0,0) && HEDLEY_HAS_WARNING("-Wcast-align") #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ _Pragma("clang diagnostic ignored \"-Wcast-align\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ #endif /* SLEEF triggers this a *lot* in their headers */ #if HEDLEY_HAS_WARNING("-Wignored-qualifiers") #define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("clang diagnostic ignored \"-Wignored-qualifiers\"") #elif HEDLEY_GCC_VERSION_CHECK(4,3,0) #define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ _Pragma("GCC diagnostic ignored \"-Wignored-qualifiers\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_IGNORED_QUALIFIERS_ #endif /* GCC emits this under some circumstances when using __int128 */ #if HEDLEY_GCC_VERSION_CHECK(4,8,0) #define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_ _Pragma("GCC diagnostic ignored \"-Wpedantic\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_PEDANTIC_ #endif /* MSVC doesn't like (__assume(0), code) and will warn about code being * unreachable, but we want it there because not all compilers * understand the unreachable macro and will complain if it is missing. * I'm planning on adding a new macro to Hedley to handle this a bit * more elegantly, but until then... */ #if defined(HEDLEY_MSVC_VERSION) #define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_ __pragma(warning(disable:4702)) #else #define SIMDE_DIAGNOSTIC_DISABLE_UNREACHABLE_ #endif /* This is a false positive from GCC in a few places. */ #if HEDLEY_GCC_VERSION_CHECK(4,7,0) #define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ _Pragma("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") #else #define SIMDE_DIAGNOSTIC_DISABLE_MAYBE_UNINITIAZILED_ #endif #if defined(SIMDE_ENABLE_NATIVE_ALIASES) #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \ SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ #else #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ #endif #define SIMDE_DISABLE_UNWANTED_DIAGNOSTICS \ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS_NATIVE_ALIASES_ \ SIMDE_DIAGNOSTIC_DISABLE_PSABI_ \ SIMDE_DIAGNOSTIC_DISABLE_NO_EMMS_INSTRUCTION_ \ SIMDE_DIAGNOSTIC_DISABLE_SIMD_PRAGMA_DEPRECATED_ \ SIMDE_DIAGNOSTIC_DISABLE_CONDITIONAL_UNINITIALIZED_ \ SIMDE_DIAGNOSTIC_DISABLE_FLOAT_EQUAL_ \ SIMDE_DIAGNOSTIC_DISABLE_NON_CONSTANT_AGGREGATE_INITIALIZER_ \ SIMDE_DIAGNOSTIC_DISABLE_EXTRA_SEMI_ \ SIMDE_DIAGNOSTIC_DISABLE_VLA_ \ SIMDE_DIAGNOSTIC_DISABLE_USED_BUT_MARKED_UNUSED_ \ SIMDE_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION_ \ SIMDE_DIAGNOSTIC_DISABLE_PASS_FAILED_ \ SIMDE_DIAGNOSTIC_DISABLE_CPP98_COMPAT_PEDANTIC_ \ SIMDE_DIAGNOSTIC_DISABLE_CPP11_LONG_LONG_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_UNUSED_BUT_SET_VARIBALE_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_CASTS_ \ SIMDE_DIAGNOSTIC_DISABLE_BUGGY_VECTOR_CONVERSION_ #endif /* !defined(SIMDE_DIAGNOSTIC_H) */
GB_unaryop__abs_uint16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_uint8 // op(A') function: GB_tran__abs_uint16_uint8 // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_uint8 ( uint16_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matmul_naive.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <math.h> #include <sys/time.h> //#include <mkl.h> #define N 8192 double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } float A[N][N], B[N][N], C[N][N]; /* Simple "naive" method to multiply two square matrices A and B to generate matrix C. */ //void myMult(int n, float *A, float *B, float *C) { //void myMult(int n, float A[N][N], float B[N][N], float C[N][N]) { void myMult() { //int i, j, k; #pragma omp target map(to:A, B) map(tofrom:C) //#pragma omp target data map(to:A[0:n*n], B[0:n*n], n) map(tofrom:C[0:n*n]) #pragma omp parallel for for(int i = 0; i < N; ++i) for(int k = 0; k < N; ++k) for(int j = 0; j < N; ++j) //C[i*n+j] += A[i*n+k] * B[k*n+j]; C[i][j] = A[i][k] * B[k][j]; } int main(int argc, char *argv[]) { if(argc != 4) { fprintf(stderr, "Use: %s size nThreads nIter\n", argv[0]); return -1; } int i, j, k, nt; //int N = atoi(argv[1]); int nThreads = atoi(argv[2]); int nIter = atoi(argv[3]); omp_set_num_threads(nThreads); memset(A, 0, N * N * sizeof(float)); memset(B, 0, N * N * sizeof(float)); memset(C, 0, N * N * sizeof(float)); //float *A = malloc(sizeof(float)*N*N); //float *B = malloc(sizeof(float)*N*N); //float *C = malloc(sizeof(float)*N*N); //#pragma omp parallel // nt = omp_get_num_threads(); // printf("%s nThreads %d matrix size %d\n", argv[0], nt, N); printf("Initializing input matrices...\n"); //#pragma omp parallel for private(i,j) for(i = 0; i < N; ++i) { for(j = 0; j < N; ++j) { //A[i*N+j] = 1.0f; //B[i*N+j] = 1.0f; //C[i*N+j] = 0.0f; A[i][j] = 1.0f; B[i][j] = 1.0f; C[i][j] = 0.0f; } } printf("warm up run to overcome setup overhead\n"); //myMult(N, A, B, C); myMult(); double aveTime, minTime=1e6, maxTime=0.0f; printf("run the matrix multiplication function %d times\n", nIter); for(i=0; i < nIter; i++) { double startTime = rtclock(); //myMult(N, A, B, C); myMult(); double endTime = rtclock(); double runtime = endTime - startTime; maxTime=(maxTime > runtime)?maxTime:runtime; minTime=(minTime < runtime)?minTime:runtime; aveTime += runtime; printf("Iteration %d: runtime %.3f\n", i, runtime); } aveTime /= nIter; printf("maxRT %g minRT %g aveRT %g GFlop/s %g\n", maxTime, minTime, aveTime, 2e-9*N*N*N/aveTime); // free(A); // free(B); // free(C); return 0; }
cvAdvDiff_kry_ompdev.c
/* ------------------------------------------------------------------- * Programmer(s): Shelby Lockhart @ LLNL * ------------------------------------------------------------------- * Acknowledgements: This example is based on cvAdvDiff_kry example * by Slaven Peles which is based on cvAdvDiff_bnd * example by Scott D. Cohen, Alan C. * Hindmarsh and Radu Serban @ LLNL * ------------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ------------------------------------------------------------------- * Example problem: * * The following is a simple example problem with a banded Jacobian, * with the program for its solution by CVODE. * The problem is the semi-discrete form of the advection-diffusion * equation in 2-D: * du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2 * on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time * interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions * are posed, and the initial condition is * u(x,y,t=0) = x(2-x)y(1-y)exp(5xy). * The PDE is discretized on a uniform MX+2 by MY+2 grid with * central differencing, and with boundary values eliminated, * leaving an ODE system of size NEQ = MX*MY. * This program solves the problem with the BDF method, Newton * iteration with the CVBAND band linear solver, and a user-supplied * Jacobian routine. * It uses scalar relative and absolute tolerances. * Output is printed at t = .1, .2, ..., 1. * Run statistics (optional outputs) are printed at the end. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */ #include <sunlinsol/sunlinsol_spgmr.h> /* access to SPGMR SUNLinearSolver */ #include <sundials/sundials_types.h> /* definition of type realtype */ #include <sundials/sundials_math.h> /* definition of ABS and EXP */ #include <nvector/nvector_openmpdev.h> /* OpenMPDEV N_Vector types, fcts., macros */ #ifdef _OPENMP #include <omp.h> #endif /* Real Constants */ #define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */ #define T0 RCONST(0.0) /* initial time */ #define T1 RCONST(0.1) /* first output time */ #define DTOUT RCONST(0.1) /* output time increment */ #define NOUT 10 /* number of output times */ #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define FIVE RCONST(5.0) /* Type : _UserData (contains model and discretization parameters) */ typedef struct { sunindextype MX, MY, NEQ; realtype dx, dy, XMAX, YMAX; realtype hdcoef, hacoef, vdcoef; } *UserData; /*typedef _UserData *UserData;*/ /* Problem setup and initialization functions */ static UserData SetUserData(int argc, char** argv); static void SetIC(N_Vector u, UserData data); /* Functions Called by the Solver */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data); static int jtv(N_Vector v, N_Vector Jv, realtype t, N_Vector u, N_Vector fu, void *user_data, N_Vector tmp); /* Private Helper Functions */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data); static void PrintOutput(realtype t, realtype umax, long int nst); static void PrintFinalStats(void *cvode_mem); /* Private function to check function return values */ static int check_retval(void *flagvalue, const char *funcname, int opt); /* *------------------------------- * Main Program *------------------------------- */ int main(int argc, char** argv) { SUNContext sunctx; realtype reltol, abstol, t, tout, umax; N_Vector u; UserData data; SUNLinearSolver LS; void *cvode_mem; int iout, retval; long int nst; u = NULL; data = NULL; LS = NULL; cvode_mem = NULL; /* Create the SUNDIALS context */ retval = SUNContext_Create(NULL, &sunctx); if(check_retval(&retval, "SUNContext_Create", 1)) return(1); /* Set model parameters */ data = SetUserData(argc, argv); if(check_retval((void *)data, "malloc", 2)) return(1); reltol = ZERO; /* Set the tolerances */ abstol = ATOL; /* Create an OpenMPDEV vector with initial values */ u = N_VNew_OpenMPDEV(data->NEQ, sunctx); /* Allocate u vector */ if(check_retval((void*)u, "N_VNew_Cuda", 0)) return(1); SetIC(u, data); /* Initialize u vector */ /* Call CVodeCreate to create the solver memory and specify the * Backward Differentiation Formula and the use of a Newton iteration */ cvode_mem = CVodeCreate(CV_BDF, sunctx); if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1); /* Call CVodeInit to initialize the integrator memory and specify the * user's right hand side function in u'=f(t,u), the initial time T0, and * the initial dependent variable vector u. */ retval = CVodeInit(cvode_mem, f, T0, u); if(check_retval(&retval, "CVodeInit", 1)) return(1); /* Call CVodeSStolerances to specify the scalar relative tolerance * and scalar absolute tolerance */ retval = CVodeSStolerances(cvode_mem, reltol, abstol); if (check_retval(&retval, "CVodeSStolerances", 1)) return(1); /* Set the pointer to user-defined data */ retval = CVodeSetUserData(cvode_mem, data); if(check_retval(&retval, "CVodeSetUserData", 1)) return(1); /* Create SPGMR solver without preconditioning * and the maximum Krylov dimension maxl */ LS = SUNLinSol_SPGMR(u, SUN_PREC_NONE, 0, sunctx); if(check_retval(&retval, "SUNLinSol_SPGMR", 1)) return(1); /* Attach the linear solver */ retval = CVodeSetLinearSolver(cvode_mem, LS, NULL); if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1); /* Set the JAcobian-times-vector function */ retval = CVodeSetJacTimes(cvode_mem, NULL, jtv); if(check_retval(&retval, "CVodeSetJacTimesVecFn", 1)) return(1); /* In loop over output points: call CVode, print results, test for errors */ umax = N_VMaxNorm(u); PrintHeader(reltol, abstol, umax, data); for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) { retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL); if(check_retval(&retval, "CVode", 1)) break; umax = N_VMaxNorm(u); retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); PrintOutput(t, umax, nst); } PrintFinalStats(cvode_mem); /* Print some final statistics */ N_VDestroy(u); /* Free the u vector */ CVodeFree(&cvode_mem); /* Free the integrator memory */ free(data); /* Free the user data */ SUNContext_Free(&sunctx); return(0); } /* *------------------------------------------- * Problem setup and initialization functions *------------------------------------------- */ /* Set model and discretization parameters */ UserData SetUserData(int argc, char *argv[]) { const sunindextype MX = 10; const sunindextype MY = 5; const realtype XMAX = RCONST(2.0); /* domain boundaries */ const realtype YMAX = RCONST(1.0); /* Allocate user data structure */ UserData ud = (UserData) malloc(sizeof *ud); if(check_retval((void*) ud, "AllocUserData", 2)) return(NULL); ud->MX = MX; ud->MY = MY; ud->NEQ = MX*MY; ud->XMAX = XMAX; ud->YMAX = YMAX; ud->dx = XMAX/(MX+1); /* Set grid coefficients in data */ ud->dy = YMAX/(MY+1); ud->hdcoef = ONE/(ud->dx*ud->dx); ud->hacoef = HALF/(TWO*ud->dx); ud->vdcoef = ONE/(ud->dy*ud->dy); return ud; } /* Set initial conditions in u vector */ static void SetIC(N_Vector u, UserData data) { /* Extract needed constants from data */ const realtype dx = data->dx; const realtype dy = data->dy; const realtype xmax = data->XMAX; const realtype ymax = data->YMAX; const sunindextype MY = data->MY; const sunindextype NEQ = data->NEQ; /* Extract pointer to solution vector data on the host */ realtype *udata = N_VGetHostArrayPointer_OpenMPDEV(u); sunindextype i, j, tid; realtype x, y; /* Load initial profile into u vector */ for (tid=0; tid < NEQ; tid++) { i = tid / MY; j = tid % MY; x = (i+1)*dx; y = (j+1)*dy; udata[tid] = x*(xmax - x)*y*(ymax - y)*SUNRexp(FIVE*x*y); } N_VCopyToDevice_OpenMPDEV(u); } /* *------------------------------- * Functions called by the solver *------------------------------- */ /* f routine. Compute f(t,u). */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data) { realtype uij, udn, uup, ult, urt, hdiff, hadv, vdiff; sunindextype i, j, k; int dev; UserData data = (UserData) user_data; /* Extract needed constants from data */ const sunindextype MX = data->MX; const sunindextype MY = data->MY; const realtype hordc = data->hdcoef; const realtype horac = data->hacoef; const realtype verdc = data->vdcoef; /* Extract pointers to vector data */ const realtype *udata = N_VGetDeviceArrayPointer_OpenMPDEV(u); realtype *dudata = N_VGetDeviceArrayPointer_OpenMPDEV(udot); /* Get device */ dev = omp_get_default_device(); /* Loop over all grid points. */ #pragma omp target map(to:MY,MX,hordc,horac,verdc) is_device_ptr(udata, dudata) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (k=0; k<MY*MX; k++) { i = k/MY; j = k%MY; uij = udata[k]; udn = (j == 0) ? ZERO : udata[k - 1]; uup = (j == MY-1) ? ZERO : udata[k + 1]; ult = (i == 0) ? ZERO : udata[k - MY]; urt = (i == MX-1) ? ZERO : udata[k + MY]; /* Set diffusion and advection terms and load into udot */ hdiff = hordc * (ult - TWO * uij + urt); hadv = horac * (urt - ult); vdiff = verdc * (uup - TWO * uij + udn); dudata[k] = hdiff + hadv + vdiff; } return(0); } /* Jacobian-times-vector routine. */ static int jtv(N_Vector v, N_Vector Jv, realtype t, N_Vector u, N_Vector fu, void *user_data, N_Vector tmp) { sunindextype i, j, k; int dev; UserData data = (UserData) user_data; /* Extract needed constants from data */ const sunindextype MX = data->MX; const sunindextype MY = data->MY; const realtype hordc = data->hdcoef; const realtype horac = data->hacoef; const realtype verdc = data->vdcoef; /* Extract pointers to vector data */ const realtype *vdata = N_VGetDeviceArrayPointer_OpenMPDEV(v); realtype *Jvdata = N_VGetDeviceArrayPointer_OpenMPDEV(Jv); N_VConst(ZERO, Jv); /* Get device */ dev = omp_get_default_device(); #pragma omp target map(to:MX,MY,hordc,horac,verdc) is_device_ptr(vdata,Jvdata) device(dev) #pragma omp teams distribute parallel for schedule(static, 1) for (k=0; k<MX*MY; k++) { i = k/MY; j = k%MY; /* set the kth element of Jv */ Jvdata[k] = -TWO * (verdc + hordc) * vdata[k]; if (i != 0) Jvdata[k] += (hordc - horac) * vdata[k-MY]; if (i != MX-1) Jvdata[k] += (hordc + horac) * vdata[k+MY]; if (j != 0) Jvdata[k] += verdc * vdata[k-1]; if (j != MY-1) Jvdata[k] += verdc * vdata[k+1]; } return(0); } /* *------------------------------- * Private helper functions *------------------------------- */ /* Print first lines of output (problem description) */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data) { printf("\n2-D Advection-Diffusion Equation\n"); printf("Mesh dimensions = %ld X %ld\n", (long int) data->MX, (long int) data->MY); printf("Total system size = %ld\n", (long int) data->NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n", reltol, abstol); printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #else printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #endif return; } /* Print current value */ static void PrintOutput(realtype t, realtype umax, long int nst) { #if defined(SUNDIALS_EXTENDED_PRECISION) printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #else printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #endif return; } /* Get and print some final statistics */ static void PrintFinalStats(void *cvode_mem) { long lenrw, leniw ; long lenrwLS, leniwLS; long int nst, nfe, nsetups, nni, ncfn, netf; long int nli, npe, nps, ncfl, nfeLS; int retval; retval = CVodeGetWorkSpace(cvode_mem, &lenrw, &leniw); check_retval(&retval, "CVodeGetWorkSpace", 1); retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); retval = CVodeGetNumRhsEvals(cvode_mem, &nfe); check_retval(&retval, "CVodeGetNumRhsEvals", 1); retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups); check_retval(&retval, "CVodeGetNumLinSolvSetups", 1); retval = CVodeGetNumErrTestFails(cvode_mem, &netf); check_retval(&retval, "CVodeGetNumErrTestFails", 1); retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni); check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1); retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn); check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1); retval = CVodeGetLinWorkSpace(cvode_mem, &lenrwLS, &leniwLS); check_retval(&retval, "CVodeGetLinWorkSpace", 1); retval = CVodeGetNumLinIters(cvode_mem, &nli); check_retval(&retval, "CVodeGetNumLinIters", 1); retval = CVodeGetNumPrecEvals(cvode_mem, &npe); check_retval(&retval, "CVodeGetNumPrecEvals", 1); retval = CVodeGetNumPrecSolves(cvode_mem, &nps); check_retval(&retval, "CVodeGetNumPrecSolves", 1); retval = CVodeGetNumLinConvFails(cvode_mem, &ncfl); check_retval(&retval, "CVodeGetNumLinConvFails", 1); retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS); check_retval(&retval, "CVodeGetNumLinRhsEvals", 1); printf("\nFinal Statistics.. \n\n"); printf("lenrw = %5ld leniw = %5ld\n", lenrw, leniw); printf("lenrwLS = %5ld leniwLS = %5ld\n", lenrwLS, leniwLS); printf("nst = %5ld\n" , nst); printf("nfe = %5ld nfeLS = %5ld\n" , nfe, nfeLS); printf("nni = %5ld nli = %5ld\n" , nni, nli); printf("nsetups = %5ld netf = %5ld\n" , nsetups, netf); printf("npe = %5ld nps = %5ld\n" , npe, nps); printf("ncfn = %5ld ncfl = %5ld\n\n", ncfn, ncfl); return; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns a retval so check if retval >= 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_retval(void *flagvalue, const char *funcname, int opt) { int *errflag; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && flagvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } /* Check if retval < 0 */ else if (opt == 1) { errflag = (int *) flagvalue; if (*errflag < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *errflag); return(1); }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && flagvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
GB_binop__islt_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__islt_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_fp32) // A*D function (colscale): GB (_AxD__islt_fp32) // D*A function (rowscale): GB (_DxB__islt_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__islt_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__islt_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_fp32) // C=scalar+B GB (_bind1st__islt_fp32) // C=scalar+B' GB (_bind1st_tran__islt_fp32) // C=A+scalar GB (_bind2nd__islt_fp32) // C=A'+scalar GB (_bind2nd_tran__islt_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_FP32 || GxB_NO_ISLT_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
variable_utils.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Ruben Zorrilla // Vicente Mataix Ferrandiz // // #if !defined(KRATOS_VARIABLE_UTILS ) #define KRATOS_VARIABLE_UTILS /* System includes */ /* External includes */ /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "includes/checks.h" #include "utilities/parallel_utilities.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class VariableUtils * @ingroup KratosCore * @brief This class implements a set of auxiliar, already parallelized, methods to * perform some common tasks related with the variable values and fixity. * @details The methods are exported to python in order to add this improvements to the python interface * @author Riccardo Rossi * @author Ruben Zorrilla * @author Vicente Mataix Ferrandiz */ class KRATOS_API(KRATOS_CORE) VariableUtils { public: ///@name Type Definitions ///@{ /// The node type typedef ModelPart::NodeType NodeType; /// The condition type typedef ModelPart::ConditionType ConditionType; /// The element type typedef ModelPart::ElementType ElementType; /// We create the Pointer related to VariableUtils KRATOS_CLASS_POINTER_DEFINITION(VariableUtils); /// The nodes container typedef ModelPart::NodesContainerType NodesContainerType; /// The conditions container typedef ModelPart::ConditionsContainerType ConditionsContainerType; /// The elements container typedef ModelPart::ElementsContainerType ElementsContainerType; /// A definition of the double variable typedef Variable< double > DoubleVarType; /// A definition of the array variable typedef Variable< array_1d<double, 3 > > ArrayVarType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ /** Destructor. */ ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Copies the nodal value of a variable from an origin model * part nodes to the nodes in a destination model part. It is assumed that * both origin and destination model parts have the same number of nodes. * @param rVariable reference to the variable to get the value from * @param rDestinationVariable reference to the variable to be set * @param rOriginModelPart origin model part from where the values are retrieved * @param rDestinationModelPart destination model part to where the values are copied to * @param BuffStep buffer step */ template< class TVarType > void CopyModelPartNodalVar( const TVarType& rVariable, const TVarType& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const unsigned int BuffStep = 0) { const int n_orig_nodes = rOriginModelPart.NumberOfNodes(); const int n_dest_nodes = rDestinationModelPart.NumberOfNodes(); KRATOS_ERROR_IF_NOT(n_orig_nodes == n_dest_nodes) << "Origin and destination model parts have different number of nodes." << "\n\t- Number of origin nodes: " << n_orig_nodes << "\n\t- Number of destination nodes: " << n_dest_nodes << std::endl; #pragma omp parallel for for(int i_node = 0; i_node < n_orig_nodes; ++i_node){ auto it_dest_node = rDestinationModelPart.NodesBegin() + i_node; const auto &it_orig_node = rOriginModelPart.NodesBegin() + i_node; const auto &r_value = it_orig_node->GetSolutionStepValue(rVariable, BuffStep); it_dest_node->GetSolutionStepValue(rDestinationVariable, BuffStep) = r_value; } } /** * @brief Copies the nodal value of a variable from an origin model * part nodes to the nodes in a destination model part. It is assumed that * both origin and destination model parts have the same number of nodes. * @param rVariable reference to the variable to get the value from and to save in * @param rOriginModelPart origin model part from where the values are retrieved * @param rDestinationModelPart destination model part to where the values are copied to * @param BuffStep buffer step */ template< class TVarType > void CopyModelPartNodalVar( const TVarType& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const unsigned int BuffStep = 0) { this->CopyModelPartNodalVar(rVariable, rVariable, rOriginModelPart, rDestinationModelPart, BuffStep); } template< class TVarType > void CopyModelPartNodalVarToNonHistoricalVar( const TVarType &rVariable, const TVarType &rDestinationVariable, const ModelPart &rOriginModelPart, ModelPart &rDestinationModelPart, const unsigned int BuffStep = 0) { const int n_orig_nodes = rOriginModelPart.NumberOfNodes(); const int n_dest_nodes = rDestinationModelPart.NumberOfNodes(); KRATOS_ERROR_IF_NOT(n_orig_nodes == n_dest_nodes) << "Origin and destination model parts have different number of nodes." << "\n\t- Number of origin nodes: " << n_orig_nodes << "\n\t- Number of destination nodes: " << n_dest_nodes << std::endl; #pragma omp parallel for for(int i_node = 0; i_node < n_orig_nodes; ++i_node){ auto it_dest_node = rDestinationModelPart.NodesBegin() + i_node; const auto &it_orig_node = rOriginModelPart.NodesBegin() + i_node; const auto &r_value = it_orig_node->GetSolutionStepValue(rVariable, BuffStep); it_dest_node->GetValue(rDestinationVariable) = r_value; } } template< class TVarType > void CopyModelPartNodalVarToNonHistoricalVar( const TVarType &rVariable, const ModelPart &rOriginModelPart, ModelPart &rDestinationModelPart, const unsigned int BuffStep = 0) { this->CopyModelPartNodalVarToNonHistoricalVar(rVariable, rVariable, rOriginModelPart, rDestinationModelPart, BuffStep); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0, const unsigned int WriteBufferStep = 0) { KRATOS_TRY KRATOS_ERROR_IF( rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable && ReadBufferStep == WriteBufferStep) << "Trying to copy flagged nodal solution step values with the same origin and destination model parts/variables/buffer steps. This is not permitted ( Origin model part: " << rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name() << ", variable: " << rOriginVariable.Name() << ", buffer step: " << ReadBufferStep << " ) !"; KRATOS_ERROR_IF_NOT(rOriginModelPart.HasNodalSolutionStepVariable(rOriginVariable)) << rOriginVariable.Name() << " is not found in nodal solution step variables list in origin model part ( " << rOriginModelPart.Name() << " )."; KRATOS_ERROR_IF_NOT(rDestinationModelPart.HasNodalSolutionStepVariable(rDestinationVariable)) << rDestinationVariable.Name() << " is not found in nodal solution step variables list in destination model part ( " << rDestinationModelPart.Name() << " )."; KRATOS_ERROR_IF(ReadBufferStep >= rOriginModelPart.GetBufferSize()) << "Origin model part ( " << rOriginModelPart.Name() << " ) buffer size is smaller or equal than read buffer size [ " << rOriginModelPart.GetBufferSize() << " <= " << ReadBufferStep << " ]."; KRATOS_ERROR_IF(WriteBufferStep >= rDestinationModelPart.GetBufferSize()) << "Destination model part ( " << rDestinationModelPart.Name() << " ) buffer size is smaller or equal than read buffer size [ " << rDestinationModelPart.GetBufferSize() << " <= " << WriteBufferStep << " ]."; CopyModelPartFlaggedVariable<NodesContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](NodeType& rDestNode, const TDataType& rValue) { rDestNode.FastGetSolutionStepValue( rDestinationVariable, WriteBufferStep) = rValue; }, [&](const NodeType& rOriginNode) -> const TDataType& { return rOriginNode.FastGetSolutionStepValue(rOriginVariable, ReadBufferStep); }); rDestinationModelPart.GetCommunicator().SynchronizeVariable(rDestinationVariable); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0, const unsigned int WriteBufferStep = 0) { KRATOS_TRY CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue, ReadBufferStep, WriteBufferStep); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0, const unsigned int WriteBufferStep = 0) { KRATOS_TRY CopyModelPartFlaggedNodalHistoricalVarToHistoricalVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, ReadBufferStep, WriteBufferStep); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0) { KRATOS_TRY KRATOS_ERROR_IF_NOT(rOriginModelPart.HasNodalSolutionStepVariable(rOriginVariable)) << rOriginVariable.Name() << " is not found in nodal solution step variables list in origin model part ( " << rOriginModelPart.Name() << " )."; KRATOS_ERROR_IF(ReadBufferStep >= rOriginModelPart.GetBufferSize()) << "Origin model part ( " << rOriginModelPart.Name() << " ) buffer size is smaller or equal than read buffer size [ " << rOriginModelPart.GetBufferSize() << " <= " << ReadBufferStep << " ]."; CopyModelPartFlaggedVariable<NodesContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](NodeType& rDestNode, const TDataType& rValue) { rDestNode.SetValue(rDestinationVariable, rValue); }, [&](const NodeType& rOriginNode) -> const TDataType& { return rOriginNode.FastGetSolutionStepValue(rOriginVariable, ReadBufferStep); }); rDestinationModelPart.GetCommunicator().SynchronizeNonHistoricalVariable(rDestinationVariable); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0) { CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue, ReadBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0) { CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, ReadBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int ReadBufferStep = 0) { CopyModelPartFlaggedNodalHistoricalVarToNonHistoricalVar( rVariable, rVariable, rModelPart, rModelPart, rFlag, CheckValue, ReadBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int WriteBufferStep = 0) { KRATOS_TRY KRATOS_ERROR_IF_NOT(rDestinationModelPart.HasNodalSolutionStepVariable(rDestinationVariable)) << rDestinationVariable.Name() << " is not found in nodal solution step variables list in destination model part ( " << rDestinationModelPart.Name() << " )."; KRATOS_ERROR_IF(WriteBufferStep >= rDestinationModelPart.GetBufferSize()) << "Destination model part ( " << rDestinationModelPart.Name() << " ) buffer size is smaller or equal than read buffer size [ " << rDestinationModelPart.GetBufferSize() << " <= " << WriteBufferStep << " ]."; CopyModelPartFlaggedVariable<NodesContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](NodeType& rDestNode, const TDataType& rValue) { rDestNode.FastGetSolutionStepValue( rDestinationVariable, WriteBufferStep) = rValue; }, [&](const NodeType& rOriginNode) -> const TDataType& { return rOriginNode.GetValue(rOriginVariable); }); rDestinationModelPart.GetCommunicator().SynchronizeVariable(rDestinationVariable); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int WriteBufferStep = 0) { CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue, WriteBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int WriteBufferStep = 0) { CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, WriteBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( const Variable<TDataType>& rVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true, const unsigned int WriteBufferStep = 0) { CopyModelPartFlaggedNodalNonHistoricalVarToHistoricalVar( rVariable, rVariable, rModelPart, rModelPart, rFlag, CheckValue, WriteBufferStep); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { KRATOS_TRY KRATOS_ERROR_IF( rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable ) << "Trying to copy flagged nodal non-historical values with the same model parts/variables. This is not permitted ( Origin model part: " << rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name() << ", variable: " << rOriginVariable.Name() << " ) !"; CopyModelPartFlaggedVariable<NodesContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](NodeType& rDestNode, const TDataType& rValue) { rDestNode.SetValue(rDestinationVariable, rValue); }, [&](const NodeType& rOriginNode) -> const TDataType& { return rOriginNode.GetValue(rOriginVariable); }); rDestinationModelPart.GetCommunicator().SynchronizeNonHistoricalVariable(rDestinationVariable); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue); } template <class TDataType> void CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedNodalNonHistoricalVarToNonHistoricalVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue); } template <class TDataType> void CopyModelPartFlaggedElementVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { KRATOS_TRY KRATOS_ERROR_IF(rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable) << "Trying to copy flagged elemental variable data with the same model " "parts/variables. This is not permitted ( Origin model part: " << rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name() << ", variable: " << rOriginVariable.Name() << " ) !"; CopyModelPartFlaggedVariable<ElementsContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](ElementType& rDestElement, const TDataType& rValue) { rDestElement.SetValue(rDestinationVariable, rValue); }, [&](const ElementType& rOriginElement) -> const TDataType& { return rOriginElement.GetValue(rOriginVariable); }); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedElementVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedElementVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue); } template <class TDataType> void CopyModelPartFlaggedElementVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedElementVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue); } template <class TDataType> void CopyModelPartFlaggedConditionVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { KRATOS_TRY KRATOS_ERROR_IF(rOriginModelPart.FullName() == rDestinationModelPart.FullName() && rOriginVariable == rDestinationVariable) << "Trying to copy flagged condition variable data with the same model " "parts/variables. This is not permitted ( Origin model part: " << rOriginModelPart.Name() << ", destination model part: " << rDestinationModelPart.Name() << ", variable: " << rOriginVariable.Name() << " ) !"; CopyModelPartFlaggedVariable<ConditionsContainerType>( rOriginModelPart, rDestinationModelPart, rFlag, CheckValue, [&](ConditionType& rDestCondition, const TDataType& rValue) { rDestCondition.SetValue(rDestinationVariable, rValue); }, [&](const ConditionType& rOriginCondition) -> const TDataType& { return rOriginCondition.GetValue(rOriginVariable); }); KRATOS_CATCH(""); } template <class TDataType> void CopyModelPartFlaggedConditionVar( const Variable<TDataType>& rOriginVariable, const Variable<TDataType>& rDestinationVariable, ModelPart& rModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedConditionVar( rOriginVariable, rDestinationVariable, rModelPart, rModelPart, rFlag, CheckValue); } template <class TDataType> void CopyModelPartFlaggedConditionVar( const Variable<TDataType>& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue = true) { CopyModelPartFlaggedConditionVar( rVariable, rVariable, rOriginModelPart, rDestinationModelPart, rFlag, CheckValue); } /** * @brief Copies the elemental value of a variable from an origin model * part elements to the elements in a destination model part. It is assumed that * both origin and destination model parts have the same number of elements. * @param rVariable reference to the variable to be set * @param rOriginModelPart origin model part from where the values are retrieved * @param rDestinationModelPart destination model part to where the values are copied to * @param BuffStep buffer step */ template< class TVarType > void CopyModelPartElementalVar( const TVarType& rVariable, const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart){ const int n_orig_elems = rOriginModelPart.NumberOfElements(); const int n_dest_elems = rDestinationModelPart.NumberOfElements(); KRATOS_ERROR_IF_NOT(n_orig_elems == n_dest_elems) << "Origin and destination model parts have different number of elements." << "\n\t- Number of origin elements: " << n_orig_elems << "\n\t- Number of destination elements: " << n_dest_elems << std::endl; #pragma omp parallel for for(int i_elems = 0; i_elems < n_orig_elems; ++i_elems){ auto it_dest_elems = rDestinationModelPart.ElementsBegin() + i_elems; const auto &it_orig_elems = rOriginModelPart.ElementsBegin() + i_elems; const auto &r_value = it_orig_elems->GetValue(rVariable); it_dest_elems->SetValue(rVariable,r_value); } } /** * @brief Sets the nodal value of a scalar variable * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rNodes reference to the objective node set */ template <class TVarType> KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable") void SetScalarVar( const TVarType &rVariable, const double Value, NodesContainerType &rNodes) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->FastGetSolutionStepValue(rVariable) = Value; } KRATOS_CATCH("") } /** * @brief Sets the nodal value of a scalar variable (considering flag) * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rNodes reference to the objective node set * @param Flag The flag to be considered in the assignation * @param Check What is checked from the flag */ template< class TVarType > KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable") void SetScalarVarForFlag( const TVarType& rVariable, const double Value, NodesContainerType& rNodes, const Flags Flag, const bool Check = true ) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; if (it_node->Is(Flag) == Check) it_node->FastGetSolutionStepValue(rVariable) = Value; } KRATOS_CATCH("") } /** * @brief Sets the nodal value of a vector variable * @param rVariable reference to the vector variable to be set * @param Value array containing the Value to be set * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable") void SetVectorVar( const ArrayVarType& rVariable, const array_1d<double, 3 >& Value, NodesContainerType& rNodes ); /** * @brief Sets the nodal value of a vector variable (considering flag) * @param rVariable reference to the vector variable to be set * @param Value array containing the Value to be set * @param rNodes reference to the objective node set * @param Flag The flag to be considered in the assignation * @param Check What is checked from the flag */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetVariable") void SetVectorVarForFlag( const ArrayVarType& rVariable, const array_1d<double, 3 >& Value, NodesContainerType& rNodes, const Flags Flag, const bool Check = true ); /** * @brief Sets the nodal value of a scalar variable * @tparam TDataType Variable data type * @tparam Variable<TDataType> Variable type * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rNodes reference to the objective node set */ template<class TDataType, class TVarType = Variable<TDataType> > void SetVariable( const TVarType& rVariable, const TDataType& rValue, NodesContainerType& rNodes ) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->FastGetSolutionStepValue(rVariable) = rValue; } KRATOS_CATCH("") } /** * @brief Sets the nodal value of a scalar variable (considering flag) * @tparam TDataType Variable data type * @tparam Variable<TDataType> Variable type * @param rVariable reference to the scalar variable to be set * @param rValue Value to be set * @param rNodes reference to the objective node set * @param Flag The flag to be considered in the assignation * @param Check What is checked from the flag */ template <class TDataType, class TVarType = Variable<TDataType>> void SetVariable( const TVarType &rVariable, const TDataType &rValue, NodesContainerType &rNodes, const Flags Flag, const bool CheckValue = true) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k < static_cast<int>(rNodes.size()); ++k) { auto it_node = rNodes.begin() + k; if (it_node->Is(Flag) == CheckValue) { it_node->FastGetSolutionStepValue(rVariable) = rValue; } } KRATOS_CATCH("") } /** * @brief Sets the nodal value of any variable to zero * @param rVariable reference to the scalar variable to be set * @param rNodes reference to the objective node set */ template< class TType , class TContainerType> void SetNonHistoricalVariableToZero( const Variable< TType >& rVariable, TContainerType& rContainer) { KRATOS_TRY this->SetNonHistoricalVariable(rVariable, rVariable.Zero(), rContainer); KRATOS_CATCH("") } /** * @brief Sets the nodal value of any variable to zero * @param rVariable reference to the scalar variable to be set * @param rNodes reference to the objective node set */ template< class TType > void SetHistoricalVariableToZero( const Variable< TType >& rVariable, NodesContainerType& rNodes) { KRATOS_TRY this->SetVariable(rVariable, rVariable.Zero(), rNodes); KRATOS_CATCH("") } /** * @brief Sets the nodal value of a scalar variable non historical * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rNodes reference to the objective node set */ template< class TVarType > KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetNonHistoricalVariable") void SetNonHistoricalScalarVar( const TVarType& rVariable, const double Value, NodesContainerType& rNodes ) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->SetValue(rVariable, Value); } KRATOS_CATCH("") } /** * @brief Sets the nodal value of a vector non historical variable * @param rVariable reference to the vector variable to be set * @param Value array containing the Value to be set * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SetNonHistoricalVariable") void SetNonHistoricalVectorVar( const ArrayVarType& rVariable, const array_1d<double, 3 >& Value, NodesContainerType& rNodes ); /** * @brief Sets the container value of any type of non historical variable * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rContainer Reference to the objective container */ template< class TType, class TContainerType, class TVarType = Variable< TType >> void SetNonHistoricalVariable( const TVarType& rVariable, const TType& Value, TContainerType& rContainer ) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = rContainer.begin() + k; it_cont->SetValue(rVariable, Value); } KRATOS_CATCH("") } /** * @brief Sets the container value of any type of non historical variable (considering flag) * @param rVariable reference to the scalar variable to be set * @param Value Value to be set * @param rContainer Reference to the objective container * @param Flag The flag to be considered in the assignation * @param Check What is checked from the flag */ template< class TType, class TContainerType, class TVarType = Variable< TType >> void SetNonHistoricalVariable( const TVarType& rVariable, const TType& rValue, TContainerType& rContainer, const Flags Flag, const bool Check = true ) { KRATOS_TRY #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = rContainer.begin() + k; if (it_cont->Is(Flag) == Check) { it_cont->SetValue(rVariable, rValue); } } KRATOS_CATCH("") } /** * @brief Clears the container data value container * @param rContainer Reference to the objective container */ template< class TContainerType> void ClearNonHistoricalData(TContainerType& rContainer) { KRATOS_TRY const auto it_cont_begin = rContainer.begin(); #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = it_cont_begin + k; it_cont->Data().Clear(); } KRATOS_CATCH("") } /** * @brief Distributes variable values in TContainerType container to nodes * * This method distributes variables values stored in TContainerType data value container in rModelPart * to nodes. Constant weighting is used for each node based on rWeightVariable value. The result * is stored in nodal non-historical data value container under the same rVariable. If IsInverseWeightProvided * is true, then the weights provided by rWeightVariable is inverted to get nodal weight. Otherwise, the value * given by rWeightVariable is used as weight. * * * @tparam TDataType Data type * @tparam TContainerType ContainerType of model part * @tparam TWeightDataType Data type of weight variable (this should be either int or double) * @param rModelPart Model part * @param rVariable Variable to be distributed * @param rWeightVariable Variable which holds weight to distribute entity values to nodes * @param IsInverseWeightProvided Whether the weight is provided as inverse or not. */ template <class TDataType, class TContainerType, class TWeightDataType> void WeightedAccumulateVariableOnNodes( ModelPart& rModelPart, const Variable<TDataType>& rVariable, const Variable<TWeightDataType>& rWeightVariable, const bool IsInverseWeightProvided = false); /** * @brief Sets a flag according to a given status over a given container * @param rFlag flag to be set * @param rFlagValue flag value to be set * @param rContainer Reference to the objective container */ template< class TContainerType > void SetFlag( const Flags& rFlag, const bool& rFlagValue, TContainerType& rContainer ) { KRATOS_TRY const auto it_cont_begin = rContainer.begin(); #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = it_cont_begin + k; it_cont->Set(rFlag, rFlagValue); } KRATOS_CATCH("") } /** * @brief Flips a flag over a given container * @param rFlag flag to be set * @param rContainer Reference to the objective container */ template< class TContainerType > void ResetFlag( const Flags& rFlag, TContainerType& rContainer ) { KRATOS_TRY const auto it_cont_begin = rContainer.begin(); #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = it_cont_begin + k; it_cont->Reset(rFlag); } KRATOS_CATCH("") } /** * @brief Flips a flag over a given container * @param rFlag flag to be set * @param rContainer Reference to the objective container */ template< class TContainerType > void FlipFlag( const Flags& rFlag, TContainerType& rContainer ) { KRATOS_TRY const auto it_cont_begin = rContainer.begin(); #pragma omp parallel for for (int k = 0; k< static_cast<int> (rContainer.size()); ++k) { auto it_cont = it_cont_begin + k; it_cont->Flip(rFlag); } KRATOS_CATCH("") } /** * @brief Takes the value of a non-historical vector variable and sets it in other variable * @param OriginVariable reference to the origin vector variable * @param SavedVariable reference to the destination vector variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveVariable") void SaveVectorVar( const ArrayVarType& OriginVariable, const ArrayVarType& SavedVariable, NodesContainerType& rNodes ); /** * @brief Takes the value of a non-historical scalar variable and sets it in other variable * @param OriginVariable reference to the origin scalar variable * @param SavedVariable reference to the destination scalar variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveVariable") void SaveScalarVar( const DoubleVarType& OriginVariable, const DoubleVarType& SavedVariable, NodesContainerType& rNodes ); /** * @brief Takes the value of a non-historical variable and saves it in another variable * For a nodal container, this takes the value of a non-historical variable and saves it in another one * @tparam TDataType The variable data type * @tparam Variable<TDataType> The variable type * @param rOriginVariable Reference to the origin variable * @param rSavedVariable Reference to the destination variable * @param rNodesContainer Reference to the nodal container */ template< class TDataType, class TVariableType = Variable<TDataType> > void SaveVariable( const TVariableType &rOriginVariable, const TVariableType &rSavedVariable, NodesContainerType &rNodesContainer) { KRATOS_TRY #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>(rNodesContainer.size()); ++i_node) { auto it_node = rNodesContainer.begin() + i_node; it_node->SetValue(rSavedVariable, it_node->FastGetSolutionStepValue(rOriginVariable)); } KRATOS_CATCH("") } /** * @brief Takes the value of a non-historical vector variable and sets it in other non-historical variable * @param OriginVariable reference to the origin vector variable * @param SavedVariable reference to the destination vector variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveNonHistoricalVariable") void SaveVectorNonHistoricalVar( const ArrayVarType& OriginVariable, const ArrayVarType& SavedVariable, NodesContainerType& rNodes ); /** * @brief Takes the value of a non-historical scalar variable and sets it in other non-historical variable * @param OriginVariable reference to the origin scalar variable * @param SavedVariable reference to the destination scalar variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use SaveNonHistoricalVariable") void SaveScalarNonHistoricalVar( const DoubleVarType& OriginVariable, const DoubleVarType& SavedVariable, NodesContainerType& rNodes ); /** * @brief Takes the value of a non-historical variable and saves it in another historical variable * For a non-nodal container, this method takes the value of an origin variable and saves it in a destination one * @tparam TDataType The variable data type * @tparam TContainerType The container type * @tparam Variable<TDataType> The variable type * @param rOriginVariable Reference to the origin variable * @param rSavedVariable Reference to the destination variable * @param rContainer Reference to the container of interest */ template< class TDataType, class TContainerType, class TVariableType = Variable<TDataType> > void SaveNonHistoricalVariable( const TVariableType &rOriginVariable, const TVariableType &rSavedVariable, TContainerType &rContainer ) { KRATOS_TRY #pragma omp parallel for for (int i = 0; i < static_cast<int>(rContainer.size()); ++i) { auto it_cont = rContainer.begin() + i; it_cont->SetValue(rSavedVariable, it_cont->GetValue(rOriginVariable)); } KRATOS_CATCH("") } /** * @brief Takes the value of an historical vector variable and sets it in other variable * @param OriginVariable reference to the origin vector variable * @param DestinationVariable reference to the destination vector variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable") void CopyVectorVar( const ArrayVarType& OriginVariable, const ArrayVarType& DestinationVariable, NodesContainerType& rNodes ); /** * @brief Takes the value of an historical double variable and sets it in other variable * @param OriginVariable reference to the origin double variable * @param DestinationVariable reference to the destination double variable * @param rNodes reference to the objective node set */ KRATOS_DEPRECATED_MESSAGE("Method deprecated, please use CopyVariable") void CopyScalarVar( const DoubleVarType &OriginVariable, const DoubleVarType &DestinationVariable, NodesContainerType &rNodes); /** * @brief Takes the value of an historical variable and sets it in another variable * This function takes the value of an historical variable and sets in another * variable in all the nodes of the provided container. * @tparam TDataType The variable data type * @tparam Variable<TDataType> The variable type * @param rOriginVariable Reference to the origin variable * @param rDestinationVariable Reference to the destination variable * @param rNodesContainer Reference to the nodes container */ template< class TDataType, class TVariableType = Variable<TDataType> > void CopyVariable( const TVariableType &rOriginVariable, const TVariableType &rDestinationVariable, NodesContainerType &rNodesContainer) { KRATOS_TRY #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>(rNodesContainer.size()); ++i_node) { auto it_node = rNodesContainer.begin() + i_node; it_node->FastGetSolutionStepValue(rDestinationVariable) = it_node->FastGetSolutionStepValue(rOriginVariable); } KRATOS_CATCH("") } /** * @brief Returns a list of nodes filtered using the given double variable and value * @param Variable reference to the double variable to be filtered * @param Value Filtering Value * @param rOriginNodes Reference to the objective node set * @return selected_nodes: List of filtered nodes */ NodesContainerType SelectNodeList( const DoubleVarType& Variable, const double Value, const NodesContainerType& rOriginNodes ); /** * @brief Checks if all the nodes of a node set has the specified variable * @param rVariable reference to a variable to be checked * @param rNodes reference to the nodes set to be checked * @return 0: if succeeds, return 0 */ template<class TVarType> int CheckVariableExists( const TVarType& rVariable, const NodesContainerType& rNodes ) { KRATOS_TRY for (auto& i_node : rNodes) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(rVariable, i_node); return 0; KRATOS_CATCH(""); } /** * @brief Fixes or frees a variable for all of the nodes in the list. The dof has to exist. * @param rVar reference to the variable to be fixed or freed * @param IsFixed if true fixes, if false frees * @param rNodes reference to the nodes set to be frixed or freed */ template< class TVarType > void ApplyFixity( const TVarType& rVar, const bool IsFixed, NodesContainerType& rNodes ) { KRATOS_TRY if (rNodes.size() != 0) { // checking the first node to avoid error being thrown in parallel region KRATOS_ERROR_IF_NOT(rNodes.begin()->HasDofFor(rVar)) << "Trying to fix/free dof of variable " << rVar.Name() << " but this dof does not exist in node #" << rNodes.begin()->Id() << "!" << std::endl; #ifdef KRATOS_DEBUG for (const auto& r_node : rNodes) { KRATOS_ERROR_IF_NOT(r_node.HasDofFor(rVar)) << "Trying to fix/free dof of variable " << rVar.Name() << " but this dof does not exist in node #" << r_node.Id() << "!" << std::endl; } #endif CheckVariableExists(rVar, rNodes); if (IsFixed) { #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->pGetDof(rVar)->FixDof(); } } else { #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->pGetDof(rVar)->FreeDof(); } } } KRATOS_CATCH("") } /** * @brief Fixes/Frees dofs based on a flag * * This method fixes/frees given rVariable, if rFlag matches CheckValue provided for that * specific node. * * @tparam TVarType Variable type * @param rVariable Variable to be fixed or freed * @param IsFixed True to fix variable, false to free variable * @param rNodes Nodes container * @param rFlag Flag to be checked to fix or free * @param CheckValue Flag value which is checked against */ template< class TVarType > void ApplyFixity( const TVarType& rVariable, const bool IsFixed, NodesContainerType& rNodes, const Flags& rFlag, const bool CheckValue = true) { KRATOS_TRY if (rNodes.size() != 0) { // checking the first node to avoid error being thrown in parallel region KRATOS_ERROR_IF_NOT(rNodes.begin()->HasDofFor(rVariable)) << "Trying to fix/free dof of variable " << rVariable.Name() << " but this dof does not exist in node #" << rNodes.begin()->Id() << "!" << std::endl; #ifdef KRATOS_DEBUG for (const auto& r_node : rNodes) { KRATOS_ERROR_IF_NOT(r_node.HasDofFor(rVariable)) << "Trying to fix/free dof of variable " << rVariable.Name() << " but this dof does not exist in node #" << r_node.Id() << "!" << std::endl; } #endif CheckVariableExists(rVariable, rNodes); if (IsFixed) { BlockPartition<NodesContainerType>(rNodes).for_each( [&rVariable, &rFlag, CheckValue](NodeType& rNode) { if (rNode.Is(rFlag) == CheckValue) { rNode.pGetDof(rVariable)->FixDof(); } }); } else { BlockPartition<NodesContainerType>(rNodes).for_each( [&rVariable, &rFlag, CheckValue](NodeType& rNode) { if (rNode.Is(rFlag) == CheckValue) { rNode.pGetDof(rVariable)->FreeDof(); } }); } } KRATOS_CATCH(""); } /** * @brief Loops along a vector data to set its values to the nodes contained in a node set. * @note This function is suitable for scalar historical variables, since each * one of the values in the data vector is set to its correspondent node. Besides, * the values must be sorted as the nodes are (value i corresponds to node i). * @param rVar reference to the variable to be fixed or freed * @param rData rData vector. Note that its lenght must equal the number of nodes * @param rNodes reference to the nodes set to be set */ template< class TVarType > void ApplyVector( const TVarType& rVar, const Vector& rData, NodesContainerType& rNodes ) { KRATOS_TRY if(rNodes.size() != 0 && rNodes.size() == rData.size()) { // First we do a check CheckVariableExists(rVar, rNodes); #pragma omp parallel for for (int k = 0; k< static_cast<int> (rNodes.size()); ++k) { NodesContainerType::iterator it_node = rNodes.begin() + k; it_node->FastGetSolutionStepValue(rVar) = rData[k]; } } else KRATOS_ERROR << "There is a mismatch between the size of data array and the number of nodes "; KRATOS_CATCH("") } /** * @brief Returns the nodal value summation of a non-historical vector variable. * @param rVar reference to the vector variable to summed * @param rModelPart reference to the model part that contains the objective node set * @return sum_value: summation vector result */ array_1d<double, 3> SumNonHistoricalNodeVectorVariable( const ArrayVarType& rVar, const ModelPart& rModelPart ); /** * @brief Returns the nodal value summation of a non-historical scalar variable. * @param rVar reference to the scalar variable to be summed * @param rModelPart reference to the model part that contains the objective node set * @return sum_value: summation result */ template< class TVarType > double SumNonHistoricalNodeScalarVariable( const TVarType& rVar, const ModelPart& rModelPart ) { KRATOS_TRY double sum_value = 0.0; // Getting info const auto& r_communicator = rModelPart.GetCommunicator(); const auto& r_local_mesh = r_communicator.LocalMesh(); const auto& r_nodes_array = r_local_mesh.Nodes(); const auto it_node_begin = r_nodes_array.begin(); #pragma omp parallel for reduction(+:sum_value) for (int k = 0; k < static_cast<int>(r_nodes_array.size()); ++k) { const auto it_node = it_node_begin + k; sum_value += it_node->GetValue(rVar); } return r_communicator.GetDataCommunicator().SumAll(sum_value); KRATOS_CATCH("") } /** * @brief This method accumulates and return a variable value * For a nodal historical variable, this method accumulates and * returns the summation in a model part. * @tparam TDataType Variable datatype * @tparam Variable<TDataType> Variable type * @param rVariable Nodal historical variable to be accumulated * @param rModelPart Model part in where the summation is done * @param BuffStep Buffer position * @return TDataType Value of the summation */ template< class TDataType, class TVarType = Variable<TDataType> > TDataType SumHistoricalVariable( const TVarType &rVariable, const ModelPart &rModelPart, const unsigned int BuffStep = 0 ) { KRATOS_TRY TDataType sum_value; AuxiliaryInitializeValue(sum_value); const auto &r_communicator = rModelPart.GetCommunicator(); const int n_nodes = r_communicator.LocalMesh().NumberOfNodes(); #pragma omp parallel firstprivate(n_nodes) { TDataType private_sum_value; AuxiliaryInitializeValue(private_sum_value); #pragma omp for for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = r_communicator.LocalMesh().NodesBegin() + i_node; private_sum_value += it_node->GetSolutionStepValue(rVariable, BuffStep); } AuxiliaryAtomicAdd(private_sum_value, sum_value); } return r_communicator.GetDataCommunicator().SumAll(sum_value); KRATOS_CATCH("") } /** * @brief Returns the condition value summation of a historical vector variable * @param rVar reference to the vector variable to be summed * @param rModelPart reference to the model part that contains the objective condition set * @return sum_value: summation result */ array_1d<double, 3> SumConditionVectorVariable( const ArrayVarType& rVar, const ModelPart& rModelPart ); /** * @brief Returns the condition value summation of a historical scalar variable * @param rVar reference to the scalar variable to be summed * @param rModelPart reference to the model part that contains the objective condition set * @return sum_value: summation result */ template< class TVarType > double SumConditionScalarVariable( const TVarType& rVar, const ModelPart& rModelPart ) { KRATOS_TRY double sum_value = 0.0; // Getting info const auto& r_communicator = rModelPart.GetCommunicator(); const auto& r_local_mesh = r_communicator.LocalMesh(); const auto& r_conditions_array = r_local_mesh.Conditions(); const auto it_cond_begin = r_conditions_array.begin(); #pragma omp parallel for reduction(+:sum_value) for (int k = 0; k < static_cast<int>(r_conditions_array.size()); ++k) { const auto it_cond = it_cond_begin + k; sum_value += it_cond->GetValue(rVar); } return r_communicator.GetDataCommunicator().SumAll(sum_value); KRATOS_CATCH("") } /** * @brief Returns the element value summation of a historical vector variable * @param rVar reference to the vector variable to be summed * @param rModelPart reference to the model part that contains the objective element set * @return sum_value: summation result */ array_1d<double, 3> SumElementVectorVariable( const ArrayVarType& rVar, const ModelPart& rModelPart ); /** * @brief Returns the element value summation of a historical scalar variable * @param rVar reference to the scalar variable to be summed * @param rModelPart reference to the model part that contains the objective element set * @return sum_value: summation result */ template< class TVarType > double SumElementScalarVariable( const TVarType& rVar, const ModelPart& rModelPart ) { KRATOS_TRY double sum_value = 0.0; // Getting info const auto& r_communicator = rModelPart.GetCommunicator(); const auto& r_local_mesh = r_communicator.LocalMesh(); const auto& r_elements_array = r_local_mesh.Elements(); const auto it_elem_begin = r_elements_array.begin(); #pragma omp parallel for reduction(+:sum_value) for (int k = 0; k < static_cast<int>(r_elements_array.size()); ++k) { const auto it_elem = it_elem_begin + k; sum_value += it_elem->GetValue(rVar); } return r_communicator.GetDataCommunicator().SumAll(sum_value); KRATOS_CATCH("") } /** * @brief This function add dofs to the nodes in a model part. It is useful since addition is done in parallel * @param rVar The variable to be added as DoF * @param rModelPart reference to the model part that contains the objective element set */ template< class TVarType > void AddDof( const TVarType& rVar, ModelPart& rModelPart ) { KRATOS_TRY // First we do a chek KRATOS_CHECK_VARIABLE_KEY(rVar) if(rModelPart.NumberOfNodes() != 0) KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rVar)) << "ERROR:: Variable : " << rVar << "not included in the Solution step data "; rModelPart.GetNodalSolutionStepVariablesList().AddDof(&rVar); #pragma omp parallel for for (int k = 0; k < static_cast<int>(rModelPart.NumberOfNodes()); ++k) { auto it_node = rModelPart.NodesBegin() + k; it_node->AddDof(rVar); } KRATOS_CATCH("") } /** * @brief This function add dofs to the nodes in a model part. It is useful since addition is done in parallel * @param rVar The variable to be added as DoF * @param rReactionVar The corresponding reaction to the added DoF * @param rModelPart reference to the model part that contains the objective element set */ template< class TVarType > void AddDofWithReaction( const TVarType& rVar, const TVarType& rReactionVar, ModelPart& rModelPart ) { KRATOS_TRY KRATOS_CHECK_VARIABLE_KEY(rVar) KRATOS_CHECK_VARIABLE_KEY(rReactionVar) if(rModelPart.NumberOfNodes() != 0) { KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rVar)) << "ERROR:: DoF Variable : " << rVar << "not included in the Soluttion step data "; KRATOS_ERROR_IF_NOT(rModelPart.NodesBegin()->SolutionStepsDataHas(rReactionVar)) << "ERROR:: Reaction Variable : " << rReactionVar << "not included in the Soluttion step data "; } // If in debug we do a check for all nodes #ifdef KRATOS_DEBUG CheckVariableExists(rVar, rModelPart.Nodes()); CheckVariableExists(rReactionVar, rModelPart.Nodes()); #endif rModelPart.GetNodalSolutionStepVariablesList().AddDof(&rVar, &rReactionVar); #pragma omp parallel for for (int k = 0; k < static_cast<int>(rModelPart.NumberOfNodes()); ++k) { auto it_node = rModelPart.NodesBegin() + k; it_node->AddDof(rVar,rReactionVar); } KRATOS_CATCH("") } /** * @brief This method checks the variable keys * @return True if all the keys are correct */ bool CheckVariableKeys(); /** * @brief This method checks the dofs * @param rModelPart reference to the model part that contains the objective element set * @return True if all the DoFs are correct */ bool CheckDofs(ModelPart& rModelPart); /** * @brief This method updates the current nodal coordinates back to the initial coordinates * @param rNodes the nodes to be updated */ void UpdateCurrentToInitialConfiguration(const ModelPart::NodesContainerType& rNodes); /** * @param rNodes the nodes to be updated * @brief This method updates the initial nodal coordinates to the current coordinates */ void UpdateInitialToCurrentConfiguration(const ModelPart::NodesContainerType& rNodes); /** * @brief This method updates the current coordinates * For each node, this method takes the value of the provided variable and updates the * current position as the initial position (X0, Y0, Z0) plus such variable value * @param rNodes * @param rUpdateVariable variable to retrieve the updating values from */ void UpdateCurrentPosition( const ModelPart::NodesContainerType& rNodes, const ArrayVarType& rUpdateVariable = DISPLACEMENT, const IndexType BufferPosition = 0 ); ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief Auxiliary double initialize method * Auxiliary method to initialize a double value * @param rValue Variable to initialize */ void AuxiliaryInitializeValue(double &rValue); /** * @brief Auxiliary array initialize method * Auxiliary method to initialize an array value * @param rValue Variable to initialize */ void AuxiliaryInitializeValue(array_1d<double,3> &rValue); /** * @brief Auxiliary scalar reduce method * Auxiliary method to perform the reduction of a scalar value * @param rPrivateValue Private variable to reduce * @param rSumValue Variable to save the reduction */ void AuxiliaryAtomicAdd( const double &rPrivateValue, double &rSumValue ); /** * @brief Auxiliary array reduce method * Auxiliary method to perform the reduction of an array value * @param rPrivateValue Private variable to reduce * @param rSumValue Variable to save the reduction */ void AuxiliaryAtomicAdd( const array_1d<double,3> &rPrivateValue, array_1d<double,3> &rSumValue ); /** * @brief This is auxiliar method to check the keys * @return True if all the keys are OK */ template< class TVarType > bool CheckVariableKeysHelper() { KRATOS_TRY for (const auto& var : KratosComponents< TVarType >::GetComponents()) { if (var.first == "NONE" || var.first == "") std::cout << " var first is NONE or empty " << var.first << var.second << std::endl; if (var.second->Name() == "NONE" || var.second->Name() == "") std::cout << var.first << var.second << std::endl; if (var.first != var.second->Name()) //name of registration does not correspond to the var name std::cout << "Registration Name = " << var.first << " Variable Name = " << std::endl; KRATOS_ERROR_IF((var.second)->Key() == 0) << (var.second)->Name() << " Key is 0." << std::endl \ << "Check that Kratos variables have been correctly registered and all required applications have been imported." << std::endl; } return true; KRATOS_CATCH("") } template <class TContainerType> TContainerType& GetContainer(ModelPart& rModelPart); template <class TContainerType> const TContainerType& GetContainer(const ModelPart& rModelPart); template <class TContainerType, class TSetterFunction, class TGetterFunction> void CopyModelPartFlaggedVariable( const ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, const Flags& rFlag, const bool CheckValue, TSetterFunction&& rSetterFunction, TGetterFunction&& rGetterFunction) { KRATOS_TRY const auto& r_origin_container = GetContainer<TContainerType>(rOriginModelPart); auto& r_destination_container = GetContainer<TContainerType>(rDestinationModelPart); const int number_of_origin_items = r_origin_container.size(); const int number_of_destination_items = r_destination_container.size(); KRATOS_ERROR_IF_NOT(number_of_origin_items == number_of_destination_items) << "Origin ( " << rOriginModelPart.Name() << " ) and destination ( " << rDestinationModelPart.Name() << " ) model parts have different number of items." << "\n\t- Number of origin items: " << number_of_origin_items << "\n\t- Number of destination items: " << number_of_destination_items << std::endl; IndexPartition<int>(number_of_origin_items).for_each([&](int i_node) { const auto& r_orig_item = *(r_origin_container.begin() + i_node); auto& r_dest_item = *(r_destination_container.begin() + i_node); if (r_orig_item.Is(rFlag) == CheckValue) { rSetterFunction(r_dest_item, rGetterFunction(r_orig_item)); } }); KRATOS_CATCH(""); } ///@} ///@name Private Acces ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class VariableUtils */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_VARIABLE_UTILS defined */
mesh.h
/////////////////////////////////////////////////////////////////////////////// // This file is part of ShapeOp, a lightweight C++ library // for static and dynamic geometry processing. // // Copyright (C) 2014 Sofien Bouaziz <sofien.bouaziz@gmail.com> // Copyright (C) 2014 LGG EPFL // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. /////////////////////////////////////////////////////////////////////////////// #ifndef MESH_H #define MESH_H /////////////////////////////////////////////////////////////////////////////// // #include <Types.h> #include <time.h> #include <vector> #include <GL/glew.h> #include <GLFW/glfw3.h> #include "Utils/PDPJConverter.h" // #include "GL/glut.h" // #include "GL/gl.h" #include "Demos/Visualization/Visualization.h" // #include <GLFW/glfw3.h> // #include <OpenGP/SurfaceMesh/SurfaceMesh.h> #include "OpenGP/SurfaceMesh/IO/IO.h" #include "OpenGP/SurfaceMesh/SurfaceMesh.h" // #include <OpenGP/SurfaceMesh/IO/IO.cpp> // #include <OpenGP/SurfaceMesh/SurfaceMesh.cpp> // #include <OpenGP/IO.h> #include "ShapeOpGL/helper.h" #include "libShapeOp/src/Solver.h" #include "libShapeOp/src/HRSolver.h" #include "libShapeOp/src/Constraint.h" #include "libShapeOp/src/Force.h" /////////////////////////////////////////////////////////////////////////////// /** \brief An abstract base class for renderable objects.*/ class Object { public: Object(const Eigen::Matrix4f &model, const Eigen::Vector3f &color) : model_(model), color_(color) {} virtual ~Object() {} virtual void display() = 0; void setModel(const Eigen::Matrix4f &model) { model_ = model; } const Eigen::Matrix4f &getModel() const { return model_; } const Eigen::Vector3f &getColor() const { return color_; } protected: Eigen::Matrix4f model_; Eigen::Vector3f color_; GLuint vertexArrayID_; GLuint vertexBuffer_; GLuint normalBuffer_; int numIndices_; }; /////////////////////////////////////////////////////////////////////////////// /** \brief A renderable Plane.*/ class Plane : public Object { public: Plane(const Eigen::Vector3f &color = Eigen::Vector3f(1.0f, 0.0f, 0.0f)) : Object(Eigen::Matrix4f::Identity(), color) { init(); } void display() { // glBindVertexArray(vertexArrayID_); glDrawArrays(GL_TRIANGLES, 0, 6); } private: void init() { /// Vertex Array // glGenVertexArrays(1, &vertexArrayID_); // glBindVertexArray(vertexArrayID_); /// Vertex Buffer GLfloat position[] = { -1.0f, 0.0f, -1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, 1.0f, }; GLfloat normal[] = { 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, }; // glGenBuffers(1, &vertexBuffer_); // glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer_); // glBufferData(GL_ARRAY_BUFFER, sizeof(position) , position, GL_STATIC_DRAW); // /// Vertex Attribute ID // glEnableVertexAttribArray(0); // glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer_); // glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0); // glGenBuffers(1, &normalBuffer_); // glBindBuffer(GL_ARRAY_BUFFER, normalBuffer_); // glBufferData(GL_ARRAY_BUFFER, sizeof(normal) , normal, GL_STATIC_DRAW); // /// Vertex Attribute ID // glEnableVertexAttribArray(1); // glBindBuffer(GL_ARRAY_BUFFER, normalBuffer_); // glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0); } }; /////////////////////////////////////////////////////////////////////////////// /** \brief A renderable Cube.*/ class Cube : public Object { public: Cube(const Eigen::Vector3f &color = Eigen::Vector3f(1.0f, 0.0f, 0.0f)) : Object(Eigen::Matrix4f::Identity(), color) { init(); } void display() { glBindVertexArray(vertexArrayID_); glDrawArrays(GL_TRIANGLES, 0, 36); } private: void init() { /// Vertex Array // glGenVertexArrays(1, &vertexArrayID_); // glBindVertexArray(vertexArrayID_); /// Vertex Buffer GLfloat position[] = { -1.0f, 1.0f, -1.0f, 1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, 1.0f, 1.0f, 1.0f, 1.0f, -1.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, -1.0f, 1.0f, -1.0f, 1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, 1.0f, -1.0f, 1.0f, 1.0f, 1.0f, -1.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -1.0f, 1.0f, -1.0f, -1.0f, -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, 1.0f, -1.0f, 1.0f, 1.0f, -1.0f, 1.0f, 1.0f, 1.0f, -1.0f, 1.0f, 1.0f, 1.0f, 1.0f, -1.0f, -1.0f, -1.0f, 1.0f, -1.0f, -1.0f, -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, }; GLfloat normal[] = { 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, -1.0f, }; // glGenBuffers(1, &vertexBuffer_); // glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer_); // glBufferData(GL_ARRAY_BUFFER, sizeof(position) , position, GL_STATIC_DRAW); // /// Vertex Attribute ID // glEnableVertexAttribArray(0); // glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer_); // glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0); // glGenBuffers(1, &normalBuffer_); // glBindBuffer(GL_ARRAY_BUFFER, normalBuffer_); // glBufferData(GL_ARRAY_BUFFER, sizeof(normal) , normal, GL_STATIC_DRAW); // /// Vertex Attribute ID // glEnableVertexAttribArray(1); // glBindBuffer(GL_ARRAY_BUFFER, normalBuffer_); // glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0); } }; /////////////////////////////////////////////////////////////////////////////// /** \brief A renderable SurfaceMesh.*/ class GLSurfaceMeshObject : public Object { public: GLSurfaceMeshObject(const std::string &file, const Eigen::Vector3f &color = Eigen::Vector3f(1.0f, 0.0f, 0.0f), bool update = true) : Object(Eigen::Matrix4f::Identity(), color) { OpenGP::read_mesh(mesh_, file); std::cout << "ReadMesh..." << std::endl; init(update); } virtual void display() override final { // glBindVertexArray(vertexArrayID_); // glDrawElements(GL_TRIANGLES, numIndices_, GL_UNSIGNED_INT, 0); } void update() { auto vnormal = mesh_.get_vertex_property<OpenGP::Vec3>("v:normal"); #pragma omp parallel for for (int i = 0; i < static_cast<int>(mesh_.n_vertices()); ++i) vnormal[OpenGP::SurfaceMesh::Vertex(i)] = mesh_.compute_vertex_normal(OpenGP::SurfaceMesh::Vertex(i)); // glBindVertexArray(vertexArrayID_); // updateVertexBuffer(false); // updateNormalBuffer(false); } OpenGP::SurfaceMesh &getMesh() { return mesh_; } private: void updateVertexBuffer(bool malloc = true) { // auto vpoints = mesh_.get_vertex_property<OpenGP::Vec3>("v:point"); // glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer_); // if (malloc) { // glBufferData(GL_ARRAY_BUFFER, mesh_.n_vertices() * sizeof(OpenGP::Vec3), vpoints.data(), GL_STATIC_DRAW); // } else { // GLfloat *data = (GLfloat *) glMapBuffer(GL_ARRAY_BUFFER, GL_READ_WRITE); // float *d = (float *) vpoints.data(); // #pragma omp parallel for // for (int i = 0; i < static_cast<int>(mesh_.n_vertices() * 3); ++i) { // data[i] = d[i]; // } // glUnmapBuffer(GL_ARRAY_BUFFER); // } } void updateNormalBuffer(bool malloc = true) { // auto vnormals = mesh_.get_vertex_property<OpenGP::Vec3>("v:normal"); // glBindBuffer(GL_ARRAY_BUFFER, normalBuffer_); // if (malloc) { // glBufferData(GL_ARRAY_BUFFER, mesh_.n_vertices() * sizeof(OpenGP::Vec3), vnormals.data(), GL_STATIC_DRAW); // } else { // GLfloat *data = (GLfloat *) glMapBuffer(GL_ARRAY_BUFFER, GL_READ_WRITE); // float *d = (float *) vnormals.data(); // #pragma omp parallel for // for (int i = 0; i < static_cast<int>(mesh_.n_vertices() * 3); ++i) { // data[i] = d[i]; // } // glUnmapBuffer(GL_ARRAY_BUFFER); // } } void init(bool update = true) { if (update) mesh_.update_vertex_normals(); // /// Vertex Array // glGenVertexArrays(1, &vertexArrayID_); // glBindVertexArray(vertexArrayID_); // /// Vertex Buffer // glGenBuffers(1, &vertexBuffer_); // updateVertexBuffer(); // /// Vertex Attribute ID // glEnableVertexAttribArray(0); // glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0); // /// Normal Buffer // glGenBuffers(1, &normalBuffer_); // updateNormalBuffer(); // /// Vertex Attribute ID // glEnableVertexAttribArray(1); // glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0); /// Index Buffer std::vector<unsigned int> indices; for (auto fit = mesh_.faces_begin(); fit != mesh_.faces_end(); ++fit) { unsigned int n = mesh_.valence(*fit); auto vit = mesh_.vertices(*fit); for (unsigned int v = 0; v < n; ++v) { indices.push_back((*vit).idx()); ++vit; } } numIndices_ = static_cast<int>(indices.size()); GLuint triangleBuffer; // glGenBuffers(1, &triangleBuffer); // glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, triangleBuffer); // glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned int), indices.data(), GL_STATIC_DRAW); } OpenGP::SurfaceMesh mesh_; }; /////////////////////////////////////////////////////////////////////////////// /** \brief A renderable Fullscreenquad.*/ class FullScreenQuad { public: FullScreenQuad() { init(); } void display() { // glBindVertexArray(vertexArrayID_); // glDrawArrays(GL_TRIANGLES, 0, 6); } private: void init() { /// Vertex Array // glGenVertexArrays(1, &vertexArrayID_); // glBindVertexArray(vertexArrayID_); /// Vertex Buffer GLfloat quad[] = { -1.0f, -1.0f, 0.0f, 1.0f, -1.0f, 0.0f, -1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, }; GLuint vertexBuffer; // glGenBuffers(1, &vertexBuffer); // glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer); // glBufferData(GL_ARRAY_BUFFER, sizeof(quad) , quad, GL_STATIC_DRAW); // /// Vertex Attribute ID // glEnableVertexAttribArray(0); // glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer); // glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0); } GLuint vertexArrayID_; }; /////////////////////////////////////////////////////////////////////////////// /** \brief A SurfaceMesh with additional capabilities to use ShapeOp.*/ class ShapeOpSurfaceMesh { public: ShapeOpSurfaceMesh(const std::shared_ptr<GLSurfaceMeshObject> &mesh, // ShapeOp::Solver& solver, ShapeOp::Scalar timestep = 0.1, double damping = 1.0, double masses = 1.0, double TriangleConstraint = 1000, ShapeOp::Scalar bendind = 10.0, ShapeOp::Scalar stretching = 10, ShapeOp::Scalar closeness = 10 ) : mesh_(mesh) { init(timestep, damping, masses, TriangleConstraint, 10,1,1); t1 = clock(); } void init(ShapeOp::Scalar timestep, double damping, double masses, double TriangleConstraint, ShapeOp::Scalar closeness, ShapeOp::Scalar stretching, ShapeOp::Scalar bendind) { OpenGP::SurfaceMesh &m = mesh_->getMesh(); auto vpoints = m.get_vertex_property<OpenGP::Vec3>("v:point"); Eigen::Map<Eigen::Matrix3Xf> vertice((float *)(vpoints.data()), 3, m.n_vertices()); ShapeOp::Matrix3X p = vertice.cast<ShapeOp::Scalar>(); solver_.setPoints(p);//kokonisakanoboru // addCloseness(closeness); solver_.addForces(std::make_shared<ShapeOp::GravityForce>(ShapeOp::Vector3(0.0, -10.0, 0.0))); // fId_ = solver_.addForces(std::make_shared<ShapeOp::VertexForce>(ShapeOp::Vector3(0.0, 0.0, 1000.0), -1)); // solver_.initialize(outputfile, true, masses, damping, timestep);//kokoppoi } void initsolver(bool dynamic, float masses, float damping, float timestep){ solver_.initialize(true, masses, damping, timestep);//kokoppoi } void initHR(SparseMatrix U){ // solver_.initHR(U);//kokoppoi } void precompute_error(ShapeOp::Scalar max_itr, float delta, int substeps, std::vector<float>& en) { solver_.init_alternative(); for(int i=0; i<substeps; i++){ float en1 = solver_.alt_pre_solve(max_itr, delta/2, 2); float en2 = solver_.alt_solve(max_itr, delta); en.push_back(en1 - en2); } } void precompute_double(ShapeOp::Scalar max_itr, float delta, int substeps, int div){ solver_.init_alternative(); for(int i=0; i<div; i++){ solver_.alt_solve(max_itr, delta/div); } } void hard_pin(int pin){ solver_.hard_pin(pin); } const ShapeOp::Matrix3X& getVelocities(){ return solver_.getVelocities(); } void process(ShapeOp::Scalar max_itr, int image_itr, float delta) { solver_.solve(max_itr, delta);//koko } void pre_process(ShapeOp::Scalar max_itr, float delta, int substeps){ solver_.init_alternative(); for(int i=0; i<substeps; i++){ float en2 = solver_.alt_solve(max_itr, delta); } } const ShapeOp::Matrix3X& getSolver2(){ return solver_.getPoints2(); } void process(ShapeOp::Scalar max_itr, int image_itr) { solver_.solve(max_itr);//koko } float compute_energy(){ return (float)solver_.compute_energy(); } void enableForce(int id) { auto c = std::dynamic_pointer_cast<ShapeOp::VertexForce>(solver_.getForce(fId_)); //TODO: this will need to be robustify c->setId(id); } void rotate(ShapeOp::Scalar a) { Eigen::AngleAxis<ShapeOp::Scalar> aa(a, ShapeOp::Vector3::UnitY()); #pragma omp parallel for for (int i = 0; i < static_cast<int>(mesh_->getMesh().n_vertices()); ++i) { auto c = std::dynamic_pointer_cast<ShapeOp::ClosenessConstraint>(solver_.getConstraint(i)); ShapeOp::Vector3 p = aa * c->getPosition(); c->setPosition(p); } } const ShapeOp::Matrix3X& getSolver(){ return solver_.getPoints(); } void dummy_process(float delta){ solver_. dummy_solve(delta); } // void comp_process(ShapeOp::Scalar max_itr, int image_itr, float delta){ // solver_.comp_solve(max_itr, delta); // } // float ComputeConstraintError(){ // return solver_.ComputeConstraintError(); // } // void process(bool itr_or_time){ // solver_.solve_step(itr_or_time);//koko // } // void vel_correction(float steptime, int div){ // solver_.vel_correction(steptime, div); // } public: void addStretching(ShapeOp::Scalar stretching) { OpenGP::SurfaceMesh &m = mesh_->getMesh(); auto f = std::bind(&ShapeOpSurfaceMesh::strainConstraint, this, stretching, std::placeholders::_1); edgeFuntional(m, f); } void strainConstraint(ShapeOp::Scalar weight, const std::vector<int> &id) { auto c = std::make_shared<ShapeOp::EdgeStrainConstraint>(id, weight, solver_.getPoints()); solver_.addConstraint(c); } void addTriangleStrain(ShapeOp::Scalar TriangleStrain) { OpenGP::SurfaceMesh &m = mesh_->getMesh(); auto f = std::bind(&ShapeOpSurfaceMesh::triangleStrainConstraint, this, TriangleStrain, std::placeholders::_1); polygonFuntional(m, f); } void triangleStrainConstraint(ShapeOp::Scalar weight, const std::vector<int> &id) { auto c = std::make_shared<ShapeOp::TriangleStrainConstraint>(id, weight, solver_.getPoints()); solver_.addConstraint(c); } void addPin(ShapeOp::Scalar closeness, int pin){ std::vector<int> idI; idI.push_back(pin); auto handles = std::make_shared<ShapeOp::ClosenessConstraint>(idI, closeness, solver_.getPoints()); solver_.addConstraint(handles); } void addChain(ShapeOp::Scalar chain){ // std::vector<int> idI; // idI.push_back(pin); // auto handles = std::make_shared<ShapeOp::ClosenessConstraint>(idI, closeness, solver_.getPoints()); // solver_.addConstraint(handles); for (int i=0; i<solver_.getPoints().cols()-1; i++) { // auto he = mesh.halfedge(*eit, 0); std::vector<int> id(2); id[0] = i; id[1] = i+1; // f(id); auto c = std::make_shared<ShapeOp::EdgeStrainConstraint>(id, chain, solver_.getPoints()); solver_.addConstraint(c); } } void addCloseness(ShapeOp::Scalar closeness) { OpenGP::SurfaceMesh &m = mesh_->getMesh(); auto vertices = m.vertex_property<OpenGP::Vec3>("v:point"); Eigen::Map<Eigen::Matrix3Xf> positions = (Eigen::Map<Eigen::Matrix3Xf>((float *)(vertices.data()), 3, m.n_vertices())); for (int i = 0; i < positions.cols(); ++i) { std::vector<int> idI; idI.push_back(i); auto handles = std::make_shared<ShapeOp::ClosenessConstraint>(idI, closeness, solver_.getPoints()); solver_.addConstraint(handles); } } void addBending(ShapeOp::Scalar bending) { OpenGP::SurfaceMesh &m = mesh_->getMesh(); auto f = std::bind(&ShapeOpSurfaceMesh::bendingConstraint, this, bending, std::placeholders::_1); trianglePairFuntional(m, f); } void bendingConstraint(ShapeOp::Scalar weight, const std::vector<int> &id) { auto c = std::make_shared<ShapeOp::BendingConstraint>(id, weight, solver_.getPoints()); solver_.addConstraint(c); } void addVolumeConstraint(ShapeOp::Scalar volume, std::string filename){ std::vector<std::vector<int>> ids; read_obj_tet(filename, ids); std::cout << "obj_tet" << ids.size() << std::endl; for(int i=0; i<ids.size(); i++){ std::vector<int> idI; idI.push_back(ids[i][0]); idI.push_back(ids[i][1]); idI.push_back(ids[i][2]); idI.push_back(ids[i][3]); auto c = std::make_shared<ShapeOp::VolumeConstraint>(idI, volume, solver_.getPoints()); solver_.addConstraint(c); } } // ShapeOp::Solver& Solver(){ // return solver_; // } std::shared_ptr<GLSurfaceMeshObject> mesh_; // ShapeOp::HRSolver solver_; ShapeOp::Solver solver_; decltype(solver_)& Solver(){ return solver_; } int fId_; int count_= 0; clock_t t1; }; /////////////////////////////////////////////////////////////////////////////// #endif ///////////////////////////////////////////////////////////////////////////////
OpenMP_ArraySum.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(){ int A[100]; int i; int sum = 0; srand(1234); for(i=0;i<100;i++) A[i] = rand()%1000; #pragma omp parallel for reduction(+:sum) for(i=0;i<100;i++) sum = sum + A[i]; for (i=0;i<100;i++) printf("A[%d]:%d\n", i, A[i]); printf("Total sum = %d\n", sum); }
HDAA_fmt_plug.c
/* HTTP Digest access authentication patch for john * * Written by Romain Raboin. OMP and intrinsics support by magnum * * This software is Copyright (c) 2008 Romain Raboin - romain.raboin at * gmail.com, and Copyright (c) 2012 magnum and it is hereby released to * the general public under the following terms: Redistribution and * use in source and binary forms, with or without modification, are * permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_HDAA; #elif FMT_REGISTERS_H john_register_one(&fmt_HDAA); #else #include <string.h> #ifdef __MMX__ #include <mmintrin.h> #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "md5.h" #include "stdint.h" #include "simd-intrinsics.h" #define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME #if !FAST_FORMATS_OMP #undef _OPENMP #endif #if defined(_OPENMP) #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "hdaa" #define FORMAT_NAME "HTTP Digest access authentication" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(reqinfo_t) #define SALT_ALIGN 4 #if defined(_OPENMP) static unsigned int omp_t = 1; #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 256 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #endif #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5) #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) #define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&0x1c)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32 ) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SEPARATOR '$' #define MAGIC "$response$" #define SIZE_TAB 12 // This is 8 x 64 bytes, so in MMX/SSE2 we support up to 9 limbs of MD5 #define HTMP 512 typedef struct { size_t h1tmplen; size_t h3tmplen; char h1tmp[HTMP]; char h3tmp[HTMP]; } reqinfo_t; /* digest authentication scheme : h1 = md5(user:realm:password) h2 = md5(method:digestURI) response = h3 = md5(h1:nonce:nonceCount:ClientNonce:qop:h2) */ /* request information */ enum e_req { R_RESPONSE, R_USER, R_REALM, R_METHOD, R_URI, R_NONCE, R_NONCECOUNT, R_CLIENTNONCE, R_QOP }; /* response:user:realm:method:uri:nonce:nonceCount:ClientNonce:qop */ static struct fmt_tests tests[] = { {"$response$679066476e67b5c7c4e88f04be567f8b$user$myrealm$GET$/$8c12bd8f728afe56d45a0ce846b70e5a$00000001$4b61913cec32e2c9$auth", "nocode"}, {"$response$faa6cb7d676e5b7c17fcbf966436aa0c$moi$myrealm$GET$/$af32592775d27b1cd06356b3a0db9ddf$00000001$8e1d49754a25aea7$auth", "kikou"}, {"$response$56940f87f1f53ade8b7d3c5a102c2bf3$usrx$teN__chars$GET$/4TLHS1TMN9cfsbqSUAdTG3CRq7qtXMptnYfn7mIIi3HRKOMhOks56e$2c0366dcbc$00000001$0153$auth", "passWOrd"}, {NULL} }; /* used by set_key */ static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; #ifdef SIMD_COEF_32 #define LIMBS 9 static unsigned char *saved_key[LIMBS]; static unsigned int *interm_key; static unsigned int *crypt_key; #else static int (*saved_len); static unsigned char (*crypt_key)[BINARY_SIZE]; #endif /* Store information about the request ()*/ static reqinfo_t *rinfo = NULL; static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 int i; #endif #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 for (i = 0; i < LIMBS; i++) saved_key[i] = mem_calloc_align(self->params.max_keys_per_crypt, 64, MEM_ALIGN_SIMD); interm_key = mem_calloc_align(self->params.max_keys_per_crypt, 16, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt, 16, MEM_ALIGN_SIMD); #else saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void done(void) { #ifdef SIMD_COEF_32 int i; #endif MEM_FREE(saved_plain); MEM_FREE(crypt_key); #ifdef SIMD_COEF_32 MEM_FREE(interm_key); for (i = 0; i < LIMBS; i++) MEM_FREE(saved_key[i]); #else MEM_FREE(saved_len); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; if (strncmp(ciphertext, MAGIC, sizeof(MAGIC) - 1) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += sizeof(MAGIC)-1; if ((p = strtokm(ctcopy, "$")) == NULL) /* hash */ goto err; if (!ishexlc(p) || strlen(p) != 32) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* user */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* realm */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* method */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* uri */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* nonce */ goto err; if (!ishexlc(p) ) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* noncecount */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* clientnonce */ goto err; if (!ishexlc(p) ) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* qop */ goto err; if ((p = strtokm(NULL, "$")) != NULL) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void set_salt(void *salt) { rinfo = salt; } static void set_key(char *key, int index) { strcpy(saved_plain[index], key); #ifndef SIMD_COEF_32 saved_len[index] = -1; #endif } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x,y=0; #ifdef _OPENMP for(; y < SIMD_PARA_MD5 * omp_t; y++) #else for(; y < SIMD_PARA_MD5; y++) #endif for(x = 0; x < SIMD_COEF_32; x++) { if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+x] ) return 1; } return 0; #else int index; for (index = 0; index < count; index++) if (!(memcmp(binary, crypt_key[index], BINARY_SIZE))) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int i,x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; for(i=0;i<(BINARY_SIZE/4);i++) if ( ((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+i*SIMD_COEF_32+x] ) return 0; return 1; #else return !(memcmp(binary, crypt_key[index], BINARY_SIZE)); #endif } static int cmp_exact(char *source, int index) { return 1; } /* convert hash from binary to ascii */ #ifdef SIMD_COEF_32 // This code should be rewritten in intrinsics, reading from // MMX or SSE2 output buffers and writing to MMX/SSE2 input buffers. static inline void sse_bin2ascii(unsigned char *conv, unsigned char *src) { unsigned int index; for (index = 0; index < NBKEYS; index++) { unsigned int i, j = 0; for (i = 0; i < BINARY_SIZE; i += 2) { unsigned int t; t = (src[GETOUTPOS((i + 1), index)] & 0x0f); t <<= 12; t |= (src[GETOUTPOS((i + 1), index)] & 0xf0); t <<= 4; t |= (src[GETOUTPOS(i, index)] & 0x0f); t <<= 8; t |= ((src[GETOUTPOS(i, index)] & 0xf0) >> 4); t += 0x06060606; t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a); *(unsigned int*)&conv[GETPOS(j, index)] = t; j+=4; } } } #endif /* SIMD_COEF_32 */ #ifdef __MMX__ static inline void bin2ascii(__m64 *conv, __m64 *src) { unsigned int i = 0; while (i != 4) { __m64 l; __m64 r; __m64 t; __m64 u; __m64 v; /* 32 bits to 64 bits */ t = _mm_set1_pi32(0x0f0f0f0f); /* Bit-wise AND the 64-bit values in M1 and M2. */ u = _mm_and_si64(_mm_srli_si64(src[(i / 2)], 4), t); v = _mm_and_si64(src[(i / 2)], t); /* interleaving */ l = _mm_unpacklo_pi8(u, v); r = _mm_unpackhi_pi8(u, v); t = _mm_set1_pi32(0x06060606); l = _mm_add_pi32(l, t); r = _mm_add_pi32(r, t); t = _mm_set1_pi32(0x01010101); /* u = (l << 4) & t */ u = _mm_and_si64(_mm_srli_si64(l, 4), t); /* v = (r << 4) & t */ v = _mm_and_si64(_mm_srli_si64(r, 4), t); t = _mm_set1_pi32(0x00270027); /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce the low 16 bits of the results. */ u = _mm_mullo_pi16(u, t); v = _mm_mullo_pi16(v, t); t = _mm_set1_pi32(0x2a2a2a2a); u = _mm_add_pi32(u, t); v = _mm_add_pi32(v, t); conv[(i++)] = _mm_add_pi32(l, u); conv[(i++)] = _mm_add_pi32(r, v); } __asm__ __volatile__("emms"); } #else static inline void bin2ascii(uint32_t *conv, uint32_t *source) { unsigned char *src = (unsigned char*)source; unsigned int i; unsigned int j = 0; uint32_t t = 0; for (i = 0; i < BINARY_SIZE; i += 2) { #if (ARCH_LITTLE_ENDIAN == 0) t = (src[i] & 0xf0); t *= 0x10; t += (src[i] & 0x0f); t *= 0x1000; t += (src[(i + 1)] & 0xf0); t *= 0x10; t += (src[(i + 1)] & 0x0f); #else t = (src[(i + 1)] & 0x0f); t *= 0x1000; t += (src[(i + 1)] & 0xf0); t *= 0x10; t += (src[i] & 0x0f); t *= 0x100; t += ((src[i] & 0xf0) >> 4); #endif t += 0x06060606; t += ((((t >> 4) & 0x01010101) * 0x27) + 0x2a2a2a2a); conv[(j++)] = t; } } #endif /* MMX */ #if SIMD_COEF_32 static inline void crypt_done(unsigned const int *source, unsigned int *dest, int index) { unsigned int i; unsigned const int *s = &source[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32]; unsigned int *d = &dest[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32]; for (i = 0; i < BINARY_SIZE / 4; i++) { *d = *s; s += SIMD_COEF_32; d += SIMD_COEF_32; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #if SIMD_COEF_32 #if defined(_OPENMP) #define ti (thread*NBKEYS+index) int thread; #pragma omp parallel for for (thread = 0; thread < (count+NBKEYS-1)/NBKEYS; thread++) #else #define thread 0 #define ti index #endif { static unsigned int crypt_len[NBKEYS]; unsigned int index, i, shortest, longest; for (index = 0; index < NBKEYS; index++) { int len; char temp; const char *key; key = rinfo->h1tmp; for (len = 0; len < rinfo->h1tmplen; len += 4, key += 4) *(ARCH_WORD_32*)&saved_key[len>>6][GETPOS(len, ti)] = *(ARCH_WORD_32*)key; len = rinfo->h1tmplen; key = (char*)&saved_plain[ti]; while((temp = *key++)) { saved_key[len>>6][GETPOS(len, ti)] = temp; len++; } saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; for (; i < (((len+8)>>6)+1)*64; i += 4) *(ARCH_WORD_32*)&saved_key[i>>6][GETPOS(i, ti)] = 0; ((unsigned int *)saved_key[(len+8)>>6])[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (ti/SIMD_COEF_32)*16*SIMD_COEF_32] = len << 3; } SIMDmd5body(&saved_key[0][thread*64*NBKEYS], &crypt_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN); sse_bin2ascii((unsigned char*)&saved_key[0][thread*64*NBKEYS], (unsigned char*)&crypt_key[thread*4*NBKEYS]); longest = 0; shortest = HTMP; for (index = 0; index < NBKEYS; index++) { const char *key; int i, len; len = CIPHERTEXT_LENGTH - 1; key = rinfo->h3tmp + CIPHERTEXT_LENGTH; // Copy a char at a time until aligned at destination while (++len & 3) saved_key[len>>6][GETPOS(len, ti)] = *key++; // ...then a word at a time. This is a good boost, we are copying over 100 bytes. for (;len < rinfo->h3tmplen; len += 4, key += 4) *(ARCH_WORD_32*)&saved_key[len>>6][GETPOS(len, ti)] = *(ARCH_WORD_32*)key; len = rinfo->h3tmplen; saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; //for (; i < (((len+8)>>6)+1)*64; i += 4) for (; i <= crypt_len[index]; i += 4) *(ARCH_WORD_32*)&saved_key[i>>6][GETPOS(i, ti)] = 0; ((unsigned int *)saved_key[(len+8)>>6])[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (ti/SIMD_COEF_32)*16*SIMD_COEF_32] = len << 3; crypt_len[index] = len; if (len > longest) longest = len; if (len < shortest) shortest = len; } // First limb SIMDmd5body(&saved_key[0][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], NULL, SSEi_MIXED_IN); // Copy any output that is done now if (shortest < 56) { if (longest < 56) memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS); else for (index = 0; index < NBKEYS; index++) if (crypt_len[index] < 56) crypt_done(interm_key, crypt_key, ti); } // Do the rest of the limbs for (i = 1; i < (((longest + 8) >> 6) + 1); i++) { SIMDmd5body(&saved_key[i][thread*64*NBKEYS], &interm_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], SSEi_RELOAD|SSEi_MIXED_IN); // Copy any output that is done now if (shortest < i*64+56) { if (shortest > (i-1)*64+55 && longest < i*64+56) memcpy(&crypt_key[thread*4*NBKEYS], &interm_key[thread*4*NBKEYS], 16*NBKEYS); else for (index = 0; index < NBKEYS; index++) if (((crypt_len[index] + 8) >> 6) == i) crypt_done(interm_key, crypt_key, ti); } } } #undef thread #undef ti #else int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { MD5_CTX ctx; int len; #ifdef _OPENMP char h3tmp[HTMP]; char h1tmp[HTMP]; #else char *h3tmp; char *h1tmp; #endif size_t tmp; #ifdef __MMX__ __m64 h1[BINARY_SIZE / sizeof(__m64)]; __m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1]; #else uint32_t h1[BINARY_SIZE / sizeof(uint32_t)]; uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1]; #endif tmp = rinfo->h1tmplen; if ((len = saved_len[index]) < 0) len = saved_len[index] = strlen(saved_plain[index]); #ifdef _OPENMP memcpy(h1tmp, rinfo->h1tmp, tmp); memcpy(h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmp + CIPHERTEXT_LENGTH, rinfo->h3tmplen - CIPHERTEXT_LENGTH); #else h3tmp = rinfo->h3tmp; h1tmp = rinfo->h1tmp; #endif memcpy(&h1tmp[tmp], saved_plain[index], len); MD5_Init(&ctx); MD5_Update(&ctx, h1tmp, len + tmp); MD5_Final((unsigned char*)h1, &ctx); bin2ascii(conv, h1); memcpy(h3tmp, conv, CIPHERTEXT_LENGTH); MD5_Init(&ctx); MD5_Update(&ctx, h3tmp, rinfo->h3tmplen); MD5_Final(crypt_key[index], &ctx); } #endif return count; } static char *mystrndup(const char *s, size_t n) { size_t tmp; size_t size; char *ret; for (tmp = 0; s[tmp] != 0 && tmp <= n; tmp++); size = n; if (tmp < size) size = tmp; if ((ret = mem_alloc(sizeof(char) * size + 1)) == NULL) return NULL; memmove(ret, s, size); ret[size] = 0; return ret; } static size_t reqlen(char *str) { size_t len; for (len = 0; str[len] != 0 && str[len] != SEPARATOR; len++); return len; } static void *get_salt(char *ciphertext) { int nb; int i; char *request[SIZE_TAB]; char *str; static reqinfo_t *r; #ifdef __MMX__ __m64 h2[BINARY_SIZE / sizeof(__m64)]; __m64 conv[CIPHERTEXT_LENGTH / sizeof(__m64) + 1]; #else unsigned int h2[BINARY_SIZE / sizeof(unsigned int)]; uint32_t conv[(CIPHERTEXT_LENGTH / sizeof(uint32_t)) + 1]; #endif MD5_CTX ctx; /* parse the password string */ if (!r) r = mem_alloc_tiny(sizeof(*r), MEM_ALIGN_WORD); memset(r, 0, sizeof(*r)); for (nb = 0, i = 1; ciphertext[i] != 0; i++) { if (ciphertext[i] == SEPARATOR) { i++; request[nb] = mystrndup(&ciphertext[i], reqlen(&ciphertext[i])); nb++; } } while (nb < SIZE_TAB) { request[nb++] = NULL; } /* calculate h2 (h2 = md5(method:digestURI))*/ str = mem_alloc(strlen(request[R_METHOD]) + strlen(request[R_URI]) + 2); sprintf(str, "%s:%s", request[R_METHOD], request[R_URI]); MD5_Init(&ctx); MD5_Update(&ctx, str, strlen(str)); MD5_Final((unsigned char*)h2, &ctx); memset(conv, 0, CIPHERTEXT_LENGTH + 1); bin2ascii(conv, h2); MEM_FREE(str); /* create a part of h1 (h1tmp = request:realm:)*/ snprintf(r->h1tmp, HTMP - PLAINTEXT_LENGTH, "%s:%s:", request[R_USER], request[R_REALM]); /* create a part of h3 (h3tmp = nonce:noncecount:clientnonce:qop:h2)*/ snprintf(&r->h3tmp[CIPHERTEXT_LENGTH], HTMP - CIPHERTEXT_LENGTH, ":%s:%s:%s:%s:%s", request[R_NONCE], request[R_NONCECOUNT], request[R_CLIENTNONCE], request[R_QOP], (char*)conv); r->h1tmplen = strlen(r->h1tmp); r->h3tmplen = strlen(&r->h3tmp[CIPHERTEXT_LENGTH]) + CIPHERTEXT_LENGTH; for (nb=0; nb < SIZE_TAB; ++nb) { MEM_FREE(request[nb]); } return r; } /* convert response to binary form */ static void *get_binary(char *ciphertext) { static unsigned int realcipher[BINARY_SIZE / sizeof(int)]; int i; ciphertext += 10; for (i = 0; i < BINARY_SIZE; i++) { ((unsigned char*)realcipher)[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; } return (void*) realcipher; } #ifdef SIMD_COEF_32 #define HASH_OFFSET (index&(SIMD_COEF_32-1))+((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*4 static int get_hash_0(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_0; } static int get_hash_1(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_1; } static int get_hash_2(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_2; } static int get_hash_3(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_3; } static int get_hash_4(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_4; } static int get_hash_5(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_5; } static int get_hash_6(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_6; } #else static int get_hash_0(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_0; } static int get_hash_1(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_1; } static int get_hash_2(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_2; } static int get_hash_3(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_3; } static int get_hash_4(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_4; } static int get_hash_5(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_5; } static int get_hash_6(int index) { return *(ARCH_WORD_32*)&crypt_key[index] & PH_MASK_6; } #endif struct fmt_main fmt_HDAA = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
Example_SIMD.6.c
/* * @@name: SIMD.6c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.0 */ #pragma omp declare simd linear(p:1) notinbranch int foo(int *p){ *p = *p + 10; return *p; } int myaddint(int *a, int *b, int n) { #pragma omp simd for (int i=0; i<n; i++){ a[i] = foo(&b[i]); /* foo is not called under a condition */ } return a[n-1]; } #pragma omp declare simd linear(p:1) inbranch float goo(float *p){ *p = *p + 18.5f; return *p; } int myaddfloat(float *x, float *y, int n) { #pragma omp simd for (int i=0; i<n; i++){ x[i] = (x[i] > y[i]) ? goo(&y[i]) : y[i]; /* goo is called under the condition (or within a branch) */ } return x[n-1]; }
reduce3.h
/* * reduce3.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef REDUCE3_H_ #define REDUCE3_H_ #define EXTRA_PARAMS_LENGTH 10 #include <op.h> #include <templatemath.h> #include <helper_cuda.h> #include <sharedmem.h> #include <omp.h> #include <pairwise_util.h> #include <dll.h> #include <shape.h> #ifdef __JNI__ #include <jni.h> #endif #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #endif namespace functions { namespace reduce3 { /** * Reduce involving * 2 arrays */ template<typename T> class Reduce3: public virtual functions::ops::Op<T> { public: virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T postProcess(T reduction, Nd4jIndex n,T **extraParamsRef) = 0; virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif T startingValue(T *input) = 0; virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif T * generateExtraParams() = 0; virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif void finalizeExtraParams(T **extraParamsRef) = 0; /** * * @param d1 * @param d2 * @param extraParams * @return */ //an op for the kernel virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T op(T d1, T d2, T **extraParamsRef) = 0; //calculate an update of the reduce operation /** * * @param old * @param opOutput * @param extraParams * @return */ virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T update(T old, T opOutput, T **extraParamsRef) = 0; /** * * @param old * @param opOutput * @param extraParams * @return */ virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T merge(T old, T opOutput, T **extraParamsRef) = 0; /** * * @param d1 * @param d2 * @param extraParams * @return */ //an op for the kernel #ifdef __CUDACC__ virtual __device__ inline T opAtomic(T d1, T d2, T **extraParamsRef) = 0; #endif #ifdef __CUDACC__ /** * Aggregate shared memory * @param sPartialsRef * @param tid * @param extraParams */ virtual __inline__ __device__ void aggregatePartials(T **sPartialsRef, int tid, int numItems, T **extraParamsRef) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. T *sPartials = *sPartialsRef; int floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { sPartials[tid - floorPow2] = update(sPartials[tid - floorPow2], sPartials[tid], extraParamsRef); } __syncthreads(); } for (int activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads) { sPartials[tid] = update(sPartials[tid], sPartials[tid + activeThreads], extraParamsRef); } __syncthreads(); } } /** Perform a reduction @param n the number of elements @param xOffset the starting offset @param dx the data to perform the reduction on @param incx the increment on which to perform the reduction @param extraParams extra parameters used for calculations @param result where to store the result of the reduction */ virtual __inline__ __device__ void transformNoElementWiseStride( T *dx, int *xShapeInfo, T *dy, int *yShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { Nd4jIndex n = shape::length(xShapeInfo); int rank = shape::rank(xShapeInfo); //shared memory space for storing intermediate results //SharedMemory <T> val; T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer(); T startingVal = this->startingValue(dx); sPartials[threadIdx.x] = startingVal; int idx[MAX_RANK]; #pragma unroll for(Nd4jIndex i = blockIdx.x * gridDim.x + threadIdx.x;i < n; i += gridDim.x * blockDim.x) { shape::ind2subC(rank,shape::shapeOf(xShapeInfo),i, idx); Nd4jIndex offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),idx,rank); Nd4jIndex yOffset = shape::getOffset(0,shape::shapeOf(yShapeInfo),shape::stride(yShapeInfo),idx,rank); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], this->opAtomic(dx[offset], dy[yOffset], &extraParams),&extraParams); } T **sPartialsRef = (T **) &sPartials; aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, n), &extraParams); /** * Look at something that uses the extra params * and aggregates the extra values propelry. *This will be used in summary stats too. */ // write result for this block to global mem if (threadIdx.x == 0) { if (postProcessOrNot) { result[blockIdx.x] = postProcess(sPartials[0], n,&extraParams); } else { result[blockIdx.x] = sPartials[0]; } } if(threadIdx.x == 0 && this->extraParamsLength() > 0) this->finalizeExtraParams(&extraParams); } /** * */ virtual __inline__ __device__ void execScalarCuda( T *dx, int *xShapeInfo, T *dy, int *yShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *allocationBuffer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { // SharedMemory <T> val; T *sPartials = (T *) manager->getSharedReductionBuffer(); // val.getPointer(); T startingVal = this->startingValue(dx); Nd4jIndex length = shape::length(xShapeInfo); int xElementWiseStride = shape::elementWiseStride(xShapeInfo); int yElementWiseStride = shape::elementWiseStride(yShapeInfo); int tid = blockIdx.x * blockDim.x + threadIdx.x; char xOrder = shape::order(xShapeInfo); char yOrder = shape::order(yShapeInfo); if(xOrder == yOrder) { if (xElementWiseStride == 1 && yElementWiseStride == 1) { for(Nd4jIndex i = threadIdx.x; i < length; i+= gridDim.x * blockDim.x) { startingVal = update(startingVal, this->opAtomic(dx[i], dy[i], &extraParams), &extraParams); } } else { for(int i = threadIdx.x; i < length; i+= gridDim.x * blockDim.x) { startingVal = update(startingVal, this->opAtomic(dx[i * xElementWiseStride], dy[i * yElementWiseStride], &extraParams), &extraParams); } } sPartials[tid] = startingVal; __syncthreads(); T **sPartialsRef = (T **) &sPartials; aggregatePartials(sPartialsRef, tid, nd4j::math::nd4j_min<int>(blockDim.x, length), &extraParams); /** * Look at something that uses the extra params * and aggregates the extra values properly. *This will be used in summary stats too. */ // write result for this block to global mem __syncthreads(); if (tid == 0) { result[0] = postProcess(sPartials[0], length,&extraParams); } } else { int *xShape = shape::shapeOf(xShapeInfo); int *xStride = shape::stride(xShapeInfo); int *yStride = shape::stride(yShapeInfo); T startingVal = this->startingValue(dx); int n = shape::length(xShapeInfo); //SharedMemory <T> val; T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer(); Nd4jIndex length = shape::length(xShapeInfo); int xElementWiseStride = shape::elementWiseStride(xShapeInfo); int yElementWiseStride = shape::elementWiseStride(yShapeInfo); char xOrder = shape::order(xShapeInfo); char yOrder = shape::order(yShapeInfo); //int *idx = (int *) malloc(sizeof(int) * shape::rank(xShapeInfo)); int rank = shape::rank(xShapeInfo); /* long allocSize = sizeof(int) * rank; int *idx = shape::cuMalloc(allocationBuffer, allocSize, manager); */ int idx[MAX_RANK]; //shared memory space for storing intermediate results sPartials[threadIdx.x] = startingVal; #pragma unroll for(unsigned int i = tid ;i < n; i += gridDim.x * blockDim.x) { shape::ind2sub(rank,shape::shapeOf(xShapeInfo),i,idx); Nd4jIndex offset = shape::getOffset(0,shape::shapeOf(xShapeInfo),shape::stride(xShapeInfo),idx,rank); Nd4jIndex yOffset = shape::getOffset(0,shape::shapeOf(yShapeInfo),shape::stride(yShapeInfo),idx,rank); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], this->opAtomic(dx[offset], dy[yOffset], &extraParams),&extraParams); } /* if (rank > MAX_COORD && tid * allocSize > PREALLOC_SIZE - allocSize) { free(idx); } */ T **sPartialsRef = (T **) &sPartials; aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, length), &extraParams); /** * Look at something that uses the extra params * and aggregates the extra values propelry. *This will be used in summary stats too. */ // write result for this block to global mem __syncthreads(); if (tid == 0) { result[tid] = postProcess(sPartials[0], n,&extraParams); } } } /** Perform a reduction @param n the number of elements @param xOffset the starting offset @param dx the data to perform the reduction on @param incx the increment on which to perform the reduction @param extraParams extra parameters used for calculations @param result where to store the result of the reduction */ virtual __inline__ __device__ void transform( T *dx, int *xShapeInfo, T *dy, int *yShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo) { /** * Gpu information for the problem */ int tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ int resultScalar; __shared__ int xElementWiseStride; __shared__ int yElementWiseStride; //shared memory space for storing intermediate results //SharedMemory <T> val; T *sPartials = (T *) manager->getSharedReductionBuffer(); //val.getPointer(); T init = this->startingValue(dx); sPartials[threadIdx.x] = init; //length for the tad __shared__ Nd4jIndex resultLength; T reduction = this->startingValue(dx); if (threadIdx.x == 0) { if (resultShapeInfo != nullptr) resultLength = shape::length(resultShapeInfo); else resultLength = 1; if (dimensionLength == 1) { if (dimension == nullptr || dimension[0] == MAX_DIMENSION) resultScalar = 1; else resultScalar = 0; } else resultScalar = 0; if (resultLength == 1) resultScalar = 1; /** * The element wise stride belong longs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along long arr * we can use arr.stride(1) as a representation * along long which to iterate. */ int *xStride = shape::stride(xShapeInfo); char xOrder = shape::order(xShapeInfo); xElementWiseStride = shape::elementWiseStride(xShapeInfo); yElementWiseStride = shape::elementWiseStride(yShapeInfo); //printf("Order is: [%c], stride is: xElementStride: [%i], passed strides are: [%i], dimension: [%i], dimensionLength: [%i]\n", xOrder, xElementWiseStride, xStride[0], dimension[0], dimensionLength); } __syncthreads(); if (!resultScalar) { __shared__ shape::TAD *tad; if (threadIdx.x == 0) { tad = new(manager->getTADSpace()) shape::TAD(); //(xShapeInfo,dimension,dimensionLength) tad->setExternalBuffers((void *) manager); tad->initWithExternalTAD(tadOnlyShapeInfo, xShapeInfo, dimension, dimensionLength); //tad->init(xShapeInfo,dimension,dimensionLength); //tad->createTadOnlyShapeInfo(); } __syncthreads(); if(dimensionLength > 1) { //decompose in to several sub tads after //moving all dimensions (in sorted order) //to the back. //permuted version of the x shape info for setting up the tad problem //decompose in to several sub tads after //moving all dimensions (in sorted order) //to the back. //permuted version of the x shape info for setting up the tad problem int *xShape = shape::shapeOf(tad->tadOnlyShapeInfo); int *xStride = shape::stride(tad->tadOnlyShapeInfo); Nd4jIndex tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); int rank = shape::rank(tad->tadOnlyShapeInfo); #pragma unroll for(Nd4jIndex i = tid; i < resultLength; i+= gridDim.x * blockDim.x) { int offset = tad->tadOffset(i); int shapeIter[MAX_RANK]; int coord[MAX_RANK]; int dim; int xStridesIter[MAX_RANK]; int yStridesIter[MAX_RANK]; T *xPointer = dx + offset; T start = this->startingValue(xPointer); int *xShape = shape::shapeOf(xShapeInfo); int *xStride = shape::stride(xShapeInfo); int *yStride = shape::stride(yShapeInfo); T startingVal = this->startingValue(dx); Nd4jIndex n = shape::length(xShapeInfo); int rank = shape::rank(xShapeInfo); if(PrepareTwoRawArrayIter<T>(rank, xShape, dx, xStride, dy, yStride, &rank, shapeIter, &dx, xStridesIter, &dy, yStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { /* Process the innermost dimension */ T *xIter = dx; T *yIter = dy; startingVal = update(startingVal, op(xIter[0],yIter[0],&extraParams),&extraParams); } ND4J_RAW_ITER_TWO_NEXT(dim, rank, coord, shapeIter, dx, xStridesIter, dy, yStridesIter); result[i] = postProcess(startingVal,n,&extraParams); } else { printf("Unable to prepare array\n"); } } __syncthreads(); } else { /** * The element wise stride belong longs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along long arr * we can use arr.stride(1) as a representation * along long which to iterate. */ Nd4jIndex xLength = shape::length(xShapeInfo); Nd4jIndex tadLength = xLength / resultLength; Nd4jIndex i = 0,j = 0; /* for (int r = blockIdx.x; r < tad->numTads; r += gridDim.x) { if (threadIdx.x == 0) tad->createOffsetForBlock(r); __syncthreads(); int tadOffsetForBlock = tad->tadOffsetForBlock; T *xVal = dx + tadOffsetForBlock; sPartials[threadIdx.x] = this->startingValue(xVal); for(int i = threadIdx.x; i < tad->tadLength; i+= blockDim.x) { int xOffsetForTad = shape::tadOffset(i, xShapeInfo, dimension, dimensionLength, nullptr); int yOffsetForTad = shape::tadOffset(i, yShapeInfo, dimension, dimensionLength, nullptr); sPartials[threadIdx.x] = this->update(sPartials[threadIdx.x],dx[tadOffsetForBlock + i * tad->tadElementWiseStride], extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength T **sPartialsRef = (T **) &sPartials; aggregatePartials(sPartialsRef, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tad->tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) result[r] = this->postProcess(sPartials[threadIdx.x], tad->tadLength, extraParams); } */ for(i = tid; i < resultLength; i+= blockDim.x * gridDim.x) { int xOffsetForTad = tad->tadOffset(i); int yOffsetForTad = xOffsetForTad;//tad->tadOffset(i); //int xOffsetForTad = shape::tadOffset(i, xShapeInfo, dimension, dimensionLength, nullptr); //int yOffsetForTad = shape::tadOffset(i, yShapeInfo, dimension, dimensionLength, nullptr); sPartials[tid] = op(dx[xOffsetForTad],dy[yOffsetForTad], &extraParams); for(j = 1; j < tadLength; j++) { sPartials[i] = update(sPartials[i],op(dx[xOffsetForTad + xElementWiseStride * j],dy[yOffsetForTad + yElementWiseStride * j], &extraParams), &extraParams); } // printf("Updating result: [%i] -> [%f]\n", i, sPartials[i]); result[i] = postProcess(sPartials[i],tadLength,&extraParams); } } } else { printf("shifting to execScalarCuda\n"); /* this->execScalarCuda( dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, manager, tadOnlyShapeInfo); */ } } #endif /** * * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ #ifdef __CUDACC__ __host__ #endif T execScalar( T *x, int *xShapeInfo, T *extraParamsVals, T *y, int *yShapeInfo) { T startingVal = this->startingValue(x); Nd4jIndex length = shape::length(xShapeInfo); int xElementWiseStride = shape::elementWiseStride(xShapeInfo); int yElementWiseStride = shape::elementWiseStride(yShapeInfo); #pragma omp parallel for simd for(int i = 0; i < this->extraParamsLength();i++) { extraParamsVals[i] = startingVal; } char xOrder = shape::order(xShapeInfo); char yOrder = shape::order(yShapeInfo); if(xOrder == yOrder) { if (xElementWiseStride == 1 && yElementWiseStride == 1) { #pragma omp simd for(int i = 0; i < length; i++) { startingVal = update(startingVal,op(x[i],y[i],&extraParamsVals),&extraParamsVals); } return postProcess(startingVal, length,&(extraParamsVals)); } else { #pragma omp simd for(Nd4jIndex i = 0; i < length; i++) { startingVal = update(startingVal,op(x[i * xElementWiseStride],y[i * yElementWiseStride],&extraParamsVals),&extraParamsVals); } return postProcess(startingVal, length,&(extraParamsVals)); } } else { int *xShape = shape::shapeOf(xShapeInfo); int *xStride = shape::stride(xShapeInfo); int *yStride = shape::stride(yShapeInfo); T startingVal = this->startingValue(x); Nd4jIndex n = shape::length(xShapeInfo); int shapeIter[MAX_RANK]; int coord[MAX_RANK]; int dim; int xStridesIter[MAX_RANK]; int yStridesIter[MAX_RANK]; int rank = shape::rank(xShapeInfo); if(PrepareTwoRawArrayIter<T>(rank, xShape, x, xStride, y, yStride, &rank, shapeIter, &x, xStridesIter, &y, yStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { /* Process the innermost dimension */ T *xIter = x; T *yIter = y; startingVal = update(startingVal, op(xIter[0],yIter[0],&extraParamsVals),&extraParamsVals); } ND4J_RAW_ITER_TWO_NEXT(dim, rank, coord, shapeIter, x, xStridesIter, y, yStridesIter); return postProcess(startingVal,n,&extraParamsVals); } else { printf("Unable to prepare array\n"); } } return startingVal; } /** * * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ #ifdef __CUDACC__ __host__ #endif void execScalar( T *x, int *xShapeInfo, T *extraParamsVals, T *y, int *yShapeInfo, T *result, int *resultShapeIfo) { result[0] = execScalar(x,xShapeInfo,extraParamsVals,y,yShapeInfo); } /** * * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ #ifdef __CUDACC__ __host__ #endif void exec( T *x, int *xShapeInfo, T *extraParamsVals, T *y, int *yShapeInfo, T *result, int *resultShapeInfo) { execScalar( x, xShapeInfo, extraParamsVals, y, yShapeInfo, result, resultShapeInfo); } /** * * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfoBuffer * @param dimension * @param dimensionLength */ void exec(T *x, int *xShapeInfo, T *extraParamsVals, T *y, int *yShapeInfo, T *result, int *resultShapeInfoBuffer, int *dimension, int dimensionLength) { if(shape::isScalar(resultShapeInfoBuffer)) { execScalar( x, xShapeInfo, extraParamsVals, y, yShapeInfo, result, resultShapeInfoBuffer); return; } char xOrder = shape::order(xShapeInfo); char yOrder = shape::order(yShapeInfo); if(xOrder != yOrder) { int shapeIter[MAX_RANK]; int coord[MAX_RANK]; int dim; int xStridesIter[MAX_RANK]; int yStridesIter[MAX_RANK]; int *xShape = shape::shapeOf(xShapeInfo); int *xStride = shape::stride(xShapeInfo); int *yStride = shape::stride(yShapeInfo); int rank = shape::rank(xShapeInfo); if(PrepareTwoRawArrayIter<T>(rank, xShape, x, xStride, y, yStride, &rank, shapeIter, &x, xStridesIter, &y, yStridesIter) >= 0) { Nd4jIndex resultLength = shape::length(resultShapeInfoBuffer); Nd4jIndex tadLength = shape::tadLength(xShapeInfo,dimension,dimensionLength); ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { /* Process the innermost dimension */ T *xIter = x; T *yIter = y; Nd4jIndex xOffset = shape::getOffset(0,xShape,xStride,coord,rank); int reductionIndex = xOffset / resultLength; result[reductionIndex] = update(result[reductionIndex],op(xIter[0],yIter[0],&extraParamsVals),&extraParamsVals); } ND4J_RAW_ITER_TWO_NEXT(dim, rank, coord, shapeIter, x, xStridesIter, y, yStridesIter); #pragma omp parallel for for(Nd4jIndex i = 0; i < resultLength ;i++) { result[i] = postProcess(result[i],tadLength,&extraParamsVals); } } else { printf("Unable to prepare array\n"); } } else { T startingVal = this->startingValue(x); Nd4jIndex resultLength = shape::length(resultShapeInfoBuffer); shape::TAD xTad(yShapeInfo,dimension,dimensionLength); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); /** * The element wise stride belong longs to a reduction index. * When used out of order, we can get rid of the data * dependencies and rely on using the max dimension * specified for stride instead. * Say we take the sum(0,1) along long arr * we can use arr.stride(1) as a representation * along long which to iterate. */ int tadElementWiseStride = shape::elementWiseStride(xTad.tadOnlyShapeInfo); int tadLength = shape::length(xTad.tadOnlyShapeInfo); #pragma omp parallel for for(Nd4jIndex i = 0; i < resultLength; i++) { T *localExtraParams = nullptr; if(this->extraParamsLength() > 0) localExtraParams = new T[this->extraParamsLength()]; for(int extraParamsIdx = 0; extraParamsIdx < this->extraParamsLength(); extraParamsIdx++) { localExtraParams[extraParamsIdx] = startingVal; } Nd4jIndex offset = xTad.tadOffsets[i]; result[i] = op(x[offset], y[offset],&localExtraParams); for(int j = 1; j < tadLength; j++) { result[i] = update(result[i],op(x[offset + tadElementWiseStride * j],y[offset + tadElementWiseStride * j], &localExtraParams), &localExtraParams); } result[i] = postProcess(result[i],tadLength,&localExtraParams); if(localExtraParams != nullptr) delete[] localExtraParams; } } } #ifdef __CUDACC__ __host__ __device__ #endif virtual ~Reduce3() { } #ifdef __CUDACC__ __host__ __device__ #endif Reduce3() { } }; namespace ops { /** * Cosine similarity between 2 * arrays */ template<typename T> class CosineSimilarity: public virtual Reduce3<T> { public: virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif T * generateExtraParams() { T *extraParams = new T[2]; return extraParams; } virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif void finalizeExtraParams(T **extraParams) { delete[] *extraParams; } virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif T startingValue(T *input) { return 0.0; } #ifdef __CUDACC__ __host__ __device__ #endif inline T postProcess(T reduction, Nd4jIndex n,T **extraParamsRef) { T *extraParams = *extraParamsRef; return reduction / (nd4j::math::nd4j_sqrt<T>(extraParams[0]) * nd4j::math::nd4j_sqrt<T>(extraParams[1])); } /** * * @param d1 * @param d2 * @param extraParams * @return */ //an op for the kernel virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T op(T d1, T d2, T **extraParamsRef) { T *extraParams = *extraParamsRef; extraParams[0] += d1 * d1; extraParams[1] += d2 * d2; return (d1 * d2); } #ifdef __CUDACC__ __host__ __device__ #endif void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) { T *extraParamsTotalRef = *extraParamsTotal; T *extraParamsLocalRef = *extraParamsLocal; extraParamsTotalRef[0] += extraParamsLocalRef[0]; extraParamsTotalRef[1] += extraParamsLocalRef[1]; } /** * * @param d1 * @param d2 * @param extraParams * @return */ //an op for the kernel #ifdef __CUDACC__ virtual __device__ inline T opAtomic(T d1, T d2, T **extraParamsRef) { T *extraParams = *extraParamsRef; nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],d1 * d1); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],d2 * d2); return (d1 * d2); } #endif //calculate an update of the reduce operation /** * * @param old * @param opOutput * @param extraParams * @return */ virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T update(T old, T opOutput, T **extraParamsRef) { return old + opOutput; } /** * * @param old * @param opOutput * @param extraParams * @return */ virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T merge(T old, T opOutput, T **extraParamsRef) { return update(old, opOutput, extraParamsRef); } #ifdef __CUDACC__ __host__ __device__ #endif virtual ~CosineSimilarity() { } #ifdef __CUDACC__ __host__ __device__ #endif CosineSimilarity() { this->extraParamsLen = 2; } }; /** * Dot product between 2 arrays */ template<typename T> class Dot: public virtual Reduce3<T> { public: virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif T * generateExtraParams() { return nullptr; } virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif void finalizeExtraParams(T **extraParamsRef) { //no-op delete[] *extraParamsRef; } virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif T startingValue(T *input) { return 0.0; } #ifdef __CUDACC__ __host__ __device__ #endif inline T postProcess(T reduction, Nd4jIndex n,T **extraParamsRef) { return reduction; } /** * * @param d1 * @param d2 * @param extraParams * @return */ //an op for the kernel virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T op(T d1, T d2, T **extraParamsRef) { return d1 * d2; } /** * * @param d1 * @param d2 * @param extraParams * @return */ //an op for the kernel #ifdef __CUDACC__ virtual __device__ inline T opAtomic(T d1, T d2, T **extraParamsRef) { return op(d1,d2,extraParamsRef); } #endif //calculate an update of the reduce operation /** * * @param old * @param opOutput * @param extraParams * @return */ virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T update(T old, T opOutput, T **extraParamsRef) { return opOutput + old; } /** * * @param old * @param opOutput * @param extraParams * @return */ virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T merge(T old, T opOutput, T **extraParamsRef) { return update(old, opOutput, extraParamsRef); } #ifdef __CUDACC__ __host__ __device__ #endif void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) { //no extra params aggregation needs to happen } #ifdef __CUDACC__ __host__ __device__ #endif virtual ~Dot() { } #ifdef __CUDACC__ __host__ __device__ #endif Dot() { } }; /** * Euclidean distance between 2 arrays */ template<typename T> class EuclideanDistance: public virtual Reduce3<T> { public: virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif T * generateExtraParams() { return nullptr; } virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif void finalizeExtraParams(T **extraParamsRef) { //no-op delete[] *extraParamsRef; } virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif T startingValue(T *input) { return 0.0; } #ifdef __CUDACC__ __host__ __device__ #endif inline T postProcess(T reduction, Nd4jIndex n,T **extraParamsRef) { return nd4j::math::nd4j_sqrt<T>(reduction); } /** * * @param d1 * @param d2 * @param extraParams * @return */ //an op for the kernel virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T op(T d1, T d2, T **extraParamsRef) { T ret = d1 - d2; return ret * ret; } /** * * @param d1 * @param d2 * @param extraParams * @return */ //an op for the kernel #ifdef __CUDACC__ virtual __device__ inline T opAtomic(T d1, T d2, T **extraParamsRef) { return op(d1,d2,extraParamsRef); } #endif //calculate an update of the reduce operation /** * * @param old * @param opOutput * @param extraParams * @return */ virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T update(T old, T opOutput, T **extraParamsRef) { return opOutput + old; } /** * * @param old * @param opOutput * @param extraParams * @return */ virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T merge(T old, T opOutput, T **extraParamsRef) { return update(old, opOutput, extraParamsRef); } #ifdef __CUDACC__ __host__ __device__ #endif void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) { //no extra params aggregation needs to happen } #ifdef __CUDACC__ __host__ __device__ #endif virtual ~EuclideanDistance() { } #ifdef __CUDACC__ __host__ __device__ #endif EuclideanDistance() { } }; /** * Manhattan distance between 2 arrays */ template<typename T> class ManhattanDistance: public virtual Reduce3<T> { public: virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif T * generateExtraParams() { return nullptr; } virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif void finalizeExtraParams(T **extraParamsRef) { //no op delete[] *extraParamsRef; } virtual #ifdef __CUDACC__ __inline__ __host__ __device__ #endif T startingValue(T *input) { return 0.0; } #ifdef __CUDACC__ __host__ __device__ #endif inline T postProcess(T reduction, Nd4jIndex n,T **extraParamsRef) { return reduction; } /** * * @param d1 * @param d2 * @param extraParams * @return */ //an op for the kernel virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T op(T d1, T d2, T **extraParamsRef) { return nd4j::math::nd4j_abs<T>(d1 - d2); } //calculate an update of the reduce operation /** * * @param old * @param opOutput * @param extraParams * @return */ virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T update(T old, T opOutput, T **extraParamsRef) { return old + opOutput; } #ifdef __CUDACC__ __host__ __device__ #endif void aggregateExtraParams(T **extraParamsTotal,T **extraParamsLocal) { //no extra params aggregation needs to happen } /** * * @param d1 * @param d2 * @param extraParams * @return */ //an op for the kernel #ifdef __CUDACC__ virtual __device__ inline T opAtomic(T d1, T d2, T **extraParamsRef) { return op(d1,d2,extraParamsRef); } #endif /** * * @param old * @param opOutput * @param extraParams * @return */ virtual #ifdef __CUDACC__ __host__ __device__ #endif inline T merge(T old, T opOutput, T **extraParamsRef) { return update(old, opOutput, extraParamsRef); } #ifdef __CUDACC__ __host__ __device__ #endif virtual ~ManhattanDistance() { } #ifdef __CUDACC__ __host__ __device__ #endif ManhattanDistance() { } }; } template<typename T> class Reduce3OpFactory { public: #ifdef __CUDACC__ __host__ __device__ #endif Reduce3OpFactory() { } /** * Create an op given an op number * @param op the op number * 0: manhattan distance * 1: euclidean distance * 2: cosine similarity * @return */ #ifdef __CUDACC__ __inline__ __device__ Reduce3<T> * getOp(int op, unsigned char *buffer) { #else Reduce3<T> * getOp(int op) { #endif if (op == 0) #ifdef __CUDACC__ return new(buffer) functions::reduce3::ops::ManhattanDistance<T>(); #else return new functions::reduce3::ops::ManhattanDistance<T>(); #endif else if (op == 1) #ifdef __CUDACC__ return new(buffer) functions::reduce3::ops::EuclideanDistance<T>(); #else return new functions::reduce3::ops::EuclideanDistance<T>(); #endif else if (op == 2) #ifdef __CUDACC__ return new(buffer) functions::reduce3::ops::CosineSimilarity<T>(); #else return new functions::reduce3::ops::CosineSimilarity<T>(); #endif else if (op == 3) #ifdef __CUDACC__ return new(buffer) functions::reduce3::ops::Dot<T>(); #else return new functions::reduce3::ops::Dot<T>(); #endif return nullptr; } }; } } #ifdef __CUDACC__ template <typename T> __inline__ __device__ void reduce3NoElementWiseStrideGeneric( int opNum, T *dx, int *xShapeInfo, T *dy, int *yShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) { __shared__ functions::reduce3::Reduce3<T> * op; __shared__ functions::reduce3::Reduce3OpFactory<T> *reduce3OpFactory; __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::reduce3::Reduce3OpFactory<T>), sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); reduce3OpFactory = new(manager->getFactorySpace()) functions::reduce3::Reduce3OpFactory<T>(); op = reduce3OpFactory->getOp(opNum, manager->getFunctionSpace()); } __syncthreads(); op->transformNoElementWiseStride( dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo); } __global__ void reduce3NoElementWiseStrideDouble( int opNum, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) { reduce3NoElementWiseStrideGeneric<double>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, postProcessOrNot, allocationPointer, tadOnlyShapeInfo); } __global__ void reduce3NoElementWiseStrideFloat( int opNum, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) { reduce3NoElementWiseStrideGeneric<float>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, postProcessOrNot, allocationPointer, tadOnlyShapeInfo); } /** * The driver api * @param opNum the number * @param n the length of the reduce * @param dx the input data * @param xShapeInfo the shape information * @param dy the pair wise reduce * @param yShapeInfo the shape information for y * @param extraParams the extra parameters in the operation * @param result where to store the result * @param resultShapeInfo the shape information * @param gpuInformation the gpu information * @param dimension the dimension to reduce along long * @param dimensionLength the dimension length * @param postProcessOrNot whether to post */ template <typename T> __device__ void reduce3Generic( int opNum, T *dx, int *xShapeInfo, T *dy, int *yShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) { __shared__ functions::reduce3::Reduce3<T> * op; __shared__ functions::reduce3::Reduce3OpFactory<T> *reduce3OpFactory; __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::reduce3::Reduce3OpFactory<T>), sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); reduce3OpFactory = new(manager->getFactorySpace()) functions::reduce3::Reduce3OpFactory<T>(); op = reduce3OpFactory->getOp(opNum, manager->getFunctionSpace()); } __syncthreads(); op->transform( dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, manager, tadOnlyShapeInfo); } template <typename T> __device__ void reduce3ScalarGeneric( int opNum, T *dx, int *xShapeInfo, T *dy, int *yShapeInfo, T *extraParams, T *result, int *resultShapeInfo, int *allocationPointer, int *tadOnlyShapeInfo) { __shared__ functions::reduce3::Reduce3<T> * op; __shared__ functions::reduce3::Reduce3OpFactory<T> *reduce3OpFactory; __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), sizeof(functions::reduce3::Reduce3OpFactory<T>), sizeof(functions::reduce3::Reduce3<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); reduce3OpFactory = new(manager->getFactorySpace()) functions::reduce3::Reduce3OpFactory<T>(); op = reduce3OpFactory->getOp(opNum, manager->getFunctionSpace()); } __syncthreads(); op->execScalarCuda( dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, manager, tadOnlyShapeInfo); } /** * The driver api * @param opNum the number * @param n the length of the reduce * @param dx the input data * @param xShapeInfo the shape information * @param dy the pair wise reduce * @param yShapeInfo the shape information for y * @param extraParams the extra parameters in the operation * @param result where to store the result * @param resultShapeInfo the shape information * @param dimension the dimension to reduce along long * @param dimensionLength the dimension length * @param postProcessOrNot whether to post [ */ extern "C" __global__ void reduce3Double( int opNum, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) { reduce3Generic<double>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo); } /** * The driver api * @param opNum the number * @param n the length of the reduce * @param dx the input data * @param xShapeInfo the shape information * @param dy the pair wise reduce * @param yShapeInfo the shape information for y * @param extraParams the extra parameters in the operation * @param result where to store the result * @param resultShapeInfo the shape information * @param gpuInformation the gpu information * @param dimension the dimension to reduce along long * @param dimensionLength the dimension length * @param postProcessOrNot whether to post [ */ extern "C" __global__ void reduce3Float( int opNum, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) { reduce3Generic<float>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void reduce3ScalarFloat( int opNum, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) { reduce3ScalarGeneric<float>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, tadOnlyShapeInfo); } extern "C" __global__ void reduce3ScalarDouble( int opNum, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationPointer, int *tadOnlyShapeInfo) { reduce3ScalarGeneric<double>( opNum, dx, xShapeInfo, dy, yShapeInfo, extraParams, result, resultShapeInfo, allocationPointer, tadOnlyShapeInfo); } #endif #endif /* REDUCE3_H_ */
domain_helpers.c
// // Created by sachetto on 19/10/17. // #include "domain_helpers.h" #include "../libraries_common/common_data_structures.h" #include "../utils/file_utils.h" #include "../utils/utils.h" #include "../string/sds.h" #include "../config_helpers/config_helpers.h" #include <float.h> #include <math.h> #include <time.h> #include <unistd.h> int calculate_cuboid_side_lengths(real_cpu start_dx, real_cpu start_dy, real_cpu start_dz, real_cpu side_length_x, real_cpu side_length_y, real_cpu side_length_z, real_cpu *real_side_length_x, real_cpu *real_side_length_y, real_cpu *real_side_length_z) { *real_side_length_x = start_dx * 2.0; *real_side_length_y = start_dy * 2.0; *real_side_length_z = start_dz * 2.0; real_cpu nx = side_length_x / start_dx; real_cpu ny = side_length_y / start_dy; real_cpu nz = side_length_z / start_dz; real_cpu proportion_dxdy = fmax(start_dx, start_dy)/fmin(start_dx, start_dy); real_cpu proportion_dxdz = fmax(start_dx, start_dz)/fmin(start_dx, start_dz); real_cpu proportion_dydz = fmax(start_dz, start_dy)/fmin(start_dz, start_dy); bool error = false; if(start_dx > start_dy) { if (side_length_x < side_length_y) { REPORT_ERROR_ON_FUNCTION("Incorrect configuration. If start_dx > start_dy, you need side_length_x > side_length_y"); error = true; } } if(start_dx > start_dz) { if (side_length_x < side_length_z) { REPORT_ERROR_ON_FUNCTION("Incorrect configuration. If start_dx > start_dz, you need side_length_x > side_length_z"); error = true; } } if(start_dy > start_dx) { if (side_length_y < side_length_x) { REPORT_ERROR_ON_FUNCTION("Incorrect configuration. If start_dy > start_dx, you need side_length_y > side_length_x"); error = true; } } if(start_dy > start_dz) { if (side_length_y < side_length_z) { REPORT_ERROR_ON_FUNCTION("Incorrect configuration. If start_dy > start_dz, you need side_length_y > side_length_z"); error = true; } } if(start_dz > start_dx) { if (side_length_z < side_length_x) { REPORT_ERROR_ON_FUNCTION("Incorrect configuration. If start_dz > start_dx, you need side_length_z > side_length_x"); error = true; } } if(start_dz > start_dy) { if (side_length_z < side_length_y) { REPORT_ERROR_ON_FUNCTION("Incorrect configuration. If start_dz > start_dy, you need side_length_z > side_length_y"); error = true; } } if(ceil(proportion_dxdy) != proportion_dxdy || ceil(proportion_dxdz) != proportion_dxdz || ceil(proportion_dydz) != proportion_dydz) { REPORT_ERROR_ON_FUNCTION("Incorrect configuration. start_dx, start_dy and start_dz need to be multiples"); error = true; } if(ceil(nx) != nx) { sds error_str = sdscatprintf(sdsempty(), "start_dx: %lf is not multiple of side_length_x: %lf", start_dx, side_length_x); REPORT_ERROR_ON_FUNCTION(error_str); sdsfree(error_str); error = true; } if(ceil(ny) != ny) { sds error_str = sdscatprintf(sdsempty(), "start_dy: %lf is not multiple of side_length_y: %lf", start_dy, side_length_y); REPORT_ERROR_ON_FUNCTION(error_str); sdsfree(error_str); error = true; } if(ceil(nz) != nz) { sds error_str = sdscatprintf(sdsempty(), "start_dz: %lf is not multiple of side_length_z: %lf", start_dz, side_length_z); REPORT_ERROR_ON_FUNCTION(error_str); sdsfree(error_str); error = true; } if(error) { return 0; } while(*real_side_length_x < side_length_x) { *real_side_length_x *= 2.0; } while(*real_side_length_y < side_length_y) { *real_side_length_y *= 2.0; } while(*real_side_length_z < side_length_z) { *real_side_length_z *= 2.0; } int proportion_h; if(start_dx > start_dy) { proportion_h = (int)(start_dx / start_dy); if(*real_side_length_y >= *real_side_length_x || (*real_side_length_x / proportion_h) < *real_side_length_y) { *real_side_length_x = *real_side_length_y * proportion_h; } else { *real_side_length_y = *real_side_length_x / proportion_h; *real_side_length_z = *real_side_length_z / proportion_h; } } else if(start_dx < start_dy) { proportion_h = (int)(start_dy / start_dx); if(*real_side_length_x >= *real_side_length_y) { *real_side_length_y = *real_side_length_x * proportion_h; *real_side_length_z = *real_side_length_z * proportion_h; } else { *real_side_length_x = *real_side_length_y / proportion_h; } } if(start_dy > start_dz) { proportion_h = (int)(start_dy / start_dz); if(*real_side_length_z >= *real_side_length_y || *real_side_length_y / proportion_h < *real_side_length_z) { *real_side_length_y = *real_side_length_z * proportion_h; *real_side_length_x = *real_side_length_x * proportion_h; } else { *real_side_length_z = *real_side_length_y / proportion_h; } } else if(start_dy < start_dz) { proportion_h = (int)(start_dz / start_dy); if(*real_side_length_y > *real_side_length_z || *real_side_length_z / proportion_h < *real_side_length_y) { *real_side_length_z = *real_side_length_y * proportion_h; } else { *real_side_length_y = *real_side_length_z / proportion_h; *real_side_length_x = *real_side_length_x / proportion_h; } } if(start_dx == start_dy) { real_cpu aux = fmax(*real_side_length_x, *real_side_length_y); *real_side_length_x = aux; *real_side_length_y = aux; } if(start_dx == start_dz) { real_cpu aux = fmax(*real_side_length_x, *real_side_length_z); *real_side_length_x = aux; *real_side_length_z = aux; } if(start_dy == start_dz) { real_cpu aux = fmax(*real_side_length_y, *real_side_length_z); *real_side_length_y = aux; *real_side_length_z = aux; } return 1; } void refine_fibrotic_cells(struct grid *the_grid) { assert(the_grid); struct cell_node *grid_cell, *auxiliar_grid_cell; struct fibrotic_mesh_info *mesh_info; grid_cell = the_grid->first_cell; while(grid_cell != 0) { mesh_info = FIBROTIC_INFO(grid_cell); if(grid_cell->active && mesh_info->fibrotic) { auxiliar_grid_cell = grid_cell; grid_cell = grid_cell->next; refine_cell(auxiliar_grid_cell, NULL, NULL); the_grid->number_of_cells += 7; } else { grid_cell = grid_cell->next; } } } void refine_border_zone_cells(struct grid *the_grid) { assert(the_grid); struct cell_node *grid_cell, *auxiliar_grid_cell; struct fibrotic_mesh_info *mesh_info; grid_cell = the_grid->first_cell; while(grid_cell != 0) { mesh_info = FIBROTIC_INFO(grid_cell); if(grid_cell->active && mesh_info->border_zone) { auxiliar_grid_cell = grid_cell; grid_cell = grid_cell->next; refine_cell(auxiliar_grid_cell, NULL, NULL); the_grid->number_of_cells += 7; } else { grid_cell = grid_cell->next; } } } /** * Sets the current domain as a domain described in the N-version benchmark * (http://rsta.royalsocietypublishing.org/content/369/1954/4331) * */ void set_benchmark_domain(struct grid *the_grid) { struct cell_node *grid_cell = the_grid->first_cell; while(grid_cell != 0) { grid_cell->active = (grid_cell->center.y < 20000) && (grid_cell->center.x < 7000) && (grid_cell->center.z < 3000); grid_cell = grid_cell->next; } the_grid->mesh_side_length.x = 7000; the_grid->mesh_side_length.y = 20000; the_grid->mesh_side_length.z = 3000; } void set_cuboid_domain(struct grid *the_grid, real_cpu size_x, real_cpu size_y, real_cpu size_z) { struct cell_node *grid_cell = the_grid->first_cell; while(grid_cell != 0) { grid_cell->active = (grid_cell->center.y < size_y) && (grid_cell->center.x < size_x) && (grid_cell->center.z < size_z); grid_cell = grid_cell->next; } the_grid->mesh_side_length.x = size_x; the_grid->mesh_side_length.y = size_y; the_grid->mesh_side_length.z = size_z; } void set_custom_mesh(struct grid *the_grid, const char *file_name, size_t size, char *read_format) { struct cell_node *grid_cell = the_grid->first_cell; FILE *file = fopen(file_name, "r"); if(!file) { print_to_stderr_and_file_and_exit("Error opening mesh described in %s!!\n", file_name); } double **mesh_points = (double **)malloc(sizeof(double *) * size); for(int i = 0; i < size; i++) { mesh_points[i] = (real_cpu *)malloc(sizeof(real_cpu) * 4); if(mesh_points[i] == NULL) { print_to_stderr_and_file_and_exit("Failed to allocate memory\n"); } } real_cpu dummy; // we don't use this value here real_cpu maxy = 0.0; real_cpu maxz = 0.0; real_cpu miny = DBL_MAX; real_cpu minz = DBL_MAX; int *fibrosis = (int *)malloc(sizeof(int) * size); char *tag = (char *)malloc(size); for(int k = 0; k < size; k++) { tag[k] = 'n'; } fibrosis[0] = -1; int i = 0; while(i < size) { fscanf(file, read_format, &mesh_points[i][0], &mesh_points[i][1], &mesh_points[i][2], &dummy, &fibrosis[i], &tag[i]); // we save the old index to reference fibrosis[i] and tags[i]. T // this is needed because the array mesh_points is sorted after reading the mesh file. mesh_points[i][3] = i; if(mesh_points[i][1] > maxy) maxy = mesh_points[i][1]; if(mesh_points[i][2] > maxz) maxz = mesh_points[i][2]; if(mesh_points[i][1] < miny) miny = mesh_points[i][1]; if(mesh_points[i][2] < minz) minz = mesh_points[i][2]; i++; } sort_vector(mesh_points, size); // we need to sort because inside_mesh perform a binary search real_cpu maxx = mesh_points[size - 1][0]; real_cpu minx = mesh_points[0][0]; int index; //print_to_stdout_and_file("[grid] minx = %g || maxx = %g || miny = %g || maxy = %g || minz = %g || maxz = %g ||\n",minx,maxx,miny,maxy,minz,maxz); real_cpu x, y, z; while(grid_cell != 0) { x = grid_cell->center.x; y = grid_cell->center.y; z = grid_cell->center.z; //print_to_stdout_and_file("[grid] x = %g || y = %g || z = %g || ",x,y,z); if(x > maxx || y > maxy || z > maxz || x < minx || y < miny || z < minz) { //print_to_stdout_and_file("Out\n"); grid_cell->active = false; } else { //print_to_stdout_and_file("Inside\n"); index = inside_mesh(mesh_points, x, y, z, 0, size - 1); if(index != -1) { grid_cell->active = true; if(fibrosis[0] != -1) { int old_index = (int)mesh_points[index][3]; INITIALIZE_FIBROTIC_INFO(grid_cell); FIBROTIC(grid_cell) = (fibrosis[old_index] == 1); BORDER_ZONE(grid_cell) = (fibrosis[old_index] == 2); SCAR_TYPE(grid_cell) = tag[old_index]; } } else { grid_cell->active = false; } } grid_cell = grid_cell->next; } fclose(file); // deallocate memory for(int l = 0; l < size; l++) { free(mesh_points[l]); } free(mesh_points); free(tag); free(fibrosis); //TODO: we need to sum the cell discretization here... the_grid->mesh_side_length.x = maxx; the_grid->mesh_side_length.y = maxy; the_grid->mesh_side_length.z = maxz; } void set_custom_mesh_with_bounds(struct grid *the_grid, const char *file_name, size_t size, real_cpu minx, real_cpu maxx, real_cpu miny, real_cpu maxy, real_cpu minz, real_cpu maxz, bool read_fibrosis) { struct cell_node *grid_cell = the_grid->first_cell; FILE *file = fopen(file_name, "r"); if(!file) { print_to_stderr_and_file_and_exit("Error opening mesh described in %s!!\n", file_name); } real_cpu **mesh_points = (real_cpu **)malloc(sizeof(real_cpu *) * size); for(int i = 0; i < size; i++) { mesh_points[i] = (real_cpu *)calloc(4, sizeof(real_cpu)); if(mesh_points[i] == NULL) { print_to_stderr_and_file_and_exit("Failed to allocate memory\n"); } } real_cpu dummy; // we don't use this value here int *fibrosis = (int *)malloc(sizeof(int) * size); char *tag = (char *)malloc(size); for(int k = 0; k < size; k++) { tag[k] = 'n'; } int i = 0; while(i < size) { fscanf(file, "%lf,%lf,%lf,%lf,%d,%c\n", &mesh_points[i][0], &mesh_points[i][1], &mesh_points[i][2], &dummy, &fibrosis[i], &tag[i]); // we save the old index to reference fibrosis[i] and tags[i]. T // this is needed because the array mesh_points is sorted after reading the mesh file. mesh_points[i][3] = i; i++; } sort_vector(mesh_points, size); // we need to sort because inside_mesh perform a binary search int index; real_cpu x, y, z; while(grid_cell != 0) { x = grid_cell->center.x; y = grid_cell->center.y; z = grid_cell->center.z; if(x > maxx || y > maxy || z > maxz || x < minx || y < miny || z < minz) { grid_cell->active = false; } else { index = inside_mesh(mesh_points, x, y, z, 0, size - 1); if(index != -1) { grid_cell->active = true; if(read_fibrosis) { int old_index = (int)mesh_points[index][3]; INITIALIZE_FIBROTIC_INFO(grid_cell); FIBROTIC(grid_cell) = (fibrosis[old_index] == 1); BORDER_ZONE(grid_cell) = (fibrosis[old_index] == 2); SCAR_TYPE(grid_cell) = tag[old_index]; } } else { grid_cell->active = false; } } grid_cell = grid_cell->next; } fclose(file); // deallocate memory for(int l = 0; l < size; l++) { free(mesh_points[l]); } the_grid->mesh_side_length.x = maxx; the_grid->mesh_side_length.y = maxy; the_grid->mesh_side_length.z = maxz; free(mesh_points); free(tag); free(fibrosis); } void set_cell_not_changeable(struct cell_node *c, real_cpu initialDiscretization) { real_cpu P1x, P1y, P1z; real_cpu P2x, P2y, P2z; real_cpu P3x, P3y, P3z; real_cpu P4x, P4y, P4z; real_cpu P5x, P5y, P5z; real_cpu P6x, P6y, P6z; real_cpu P7x, P7y, P7z; real_cpu P8x, P8y, P8z; real_cpu Cx, Cy, Cz; if(initialDiscretization == 100.0) { P1x = 6950; P1y = 50; P1z = 50; P2x = 6950; P2y = 19950; P2z = 50; P3x = 6950; P3y = 50; P3z = 2950; P4x = 6950; P4y = 19950; P4z = 2950; P5x = 50; P5y = 50; P5z = 50; P6x = 50; P6y = 19950; P6z = 50; P7x = 50; P7y = 50; P7z = 2950; P8x = 50; P8y = 19950; P8z = 2950; Cx = 3450; Cy = 9950; Cz = 1450; } else if(initialDiscretization == 200.0) { P1x = 6900; P1y = 100; P1z = 100; P2x = 6900; P2y = 19900; P2z = 100; P3x = 6900; P3y = 100; P3z = 2900; P4x = 6900; P4y = 19900; P4z = 2900; P5x = 100; P5y = 100; P5z = 100; P6x = 100; P6y = 19900; P6z = 100; P7x = 100; P7y = 100; P7z = 2900; P8x = 100; P8y = 19900; P8z = 2900; Cx = 3500; Cy = 9900; Cz = 1500; } else if(initialDiscretization == 125.0) { P1x = 6937.5; P1y = 62.5; P1z = 62.5; P2x = 6937.5; P2y = 19937.5; P2z = 62.5; P3x = 6937.5; P3y = 62.5; P3z = 2937.5; P4x = 6937.5; P4y = 19937.5; P4z = 2937.5; P5x = 62.5; P5y = 62.5; P5z = 62.5; P6x = 62.5; P6y = 19937.5; P6z = 62.5; P7x = 3937.5; P7y = 19937.5; P7z = 62.5; P8x = 62.5; P8y = 19937.5; P8z = 2937.5; Cx = 3437.5; Cy = 9937.5; Cz = 1562.5; } else if(initialDiscretization == 250.0) { P1x = 6875; P1y = 125; P1z = 125; P2x = 6875; P2y = 19875; P2z = 125; P3x = 6875; P3y = 125; P3z = 2875; P4x = 6875; P4y = 19875; P4z = 2875; P5x = 125; P5y = 125; P5z = 125; P6x = 125; P6y = 19875; P6z = 125; P7x = 125; P7y = 125; P7z = 2875; P8x = 125; P8y = 19875; P8z = 2875; Cx = 3375; Cy = 9875; Cz = 1125; } else { P1x = -1; P1y = -1; P1z = -1; P2x = -1; P2y = -1; P2z = -1; P3x = -1; P3y = -1; P3z = -1; P4x = -1; P4y = -1; P4z = -1; P5x = -1; P5y = -1; P5z = -1; P6x = -1; P6y = -1; P6z = -1; P7x = -1; P7y = -1; P7z = -1; P8x = -1; P8y = -1; P8z = -1; Cx = -1; Cy = -1; Cz = -1; } bool cannotChange = ((c->center.x == P1x) && (c->center.y == P1y) && (c->center.z == P1z)); cannotChange |= ((c->center.x == P2x) && (c->center.y == P2y) && (c->center.z == P2z)); cannotChange |= ((c->center.x == P3x) && (c->center.y == P3y) && (c->center.z == P3z)); cannotChange |= ((c->center.x == P4x) && (c->center.y == P4y) && (c->center.z == P4z)); cannotChange |= ((c->center.x == P5x) && (c->center.y == P5y) && (c->center.z == P5z)); cannotChange |= ((c->center.x == P6x) && (c->center.y == P6y) && (c->center.z == P6z)); cannotChange |= ((c->center.x == P7x) && (c->center.y == P7y) && (c->center.z == P7z)); cannotChange |= ((c->center.x == P8x) && (c->center.y == P8y) && (c->center.z == P8z)); cannotChange |= ((c->center.x == Cx) && (c->center.y == Cy) && (c->center.z == Cz)); c->can_change = !cannotChange; } void set_plain_fibrosis(struct grid *the_grid, real_cpu phi, unsigned fib_seed) { print_to_stdout_and_file("Making %.2lf %% of cells inactive\n", phi * 100.0); struct cell_node *grid_cell; if(fib_seed == 0) fib_seed = (unsigned)time(NULL) + getpid(); srand(fib_seed); print_to_stdout_and_file("Using %u as seed\n", fib_seed); grid_cell = the_grid->first_cell; while(grid_cell != 0) { if(grid_cell->active) { real_cpu p = (real_cpu)(rand()) / (RAND_MAX); if(p < phi) { grid_cell->active = false; } INITIALIZE_FIBROTIC_INFO(grid_cell); FIBROTIC(grid_cell) = true; } grid_cell = grid_cell->next; } } void set_plain_source_sink_fibrosis (struct grid *the_grid, real_cpu channel_width, real_cpu channel_length) { print_to_stdout_and_file("Making upper and down left corner inactive !\n"); bool inside; real_cpu side_length_x = the_grid->mesh_side_length.x; real_cpu side_length_y = the_grid->mesh_side_length.y; real_cpu side_length_z = the_grid->mesh_side_length.z; real_cpu region_height = (side_length_y - channel_width) / 2.0; struct cell_node *grid_cell; grid_cell = the_grid->first_cell; while(grid_cell != 0) { if(grid_cell->active) { real_cpu x = grid_cell->center.x; real_cpu y = grid_cell->center.y; real_cpu z = grid_cell->center.z; // Check region 1 inside = (x >= 0.0) && (x <= channel_length) &&\ (y >= 0.0) && (y <= region_height); // Check region 2 inside |= (x >= 0.0) && (x <= channel_length) &&\ (y >= region_height + channel_width) && (y <= side_length_y); if(inside) { grid_cell->active = false; } INITIALIZE_FIBROTIC_INFO(grid_cell); FIBROTIC(grid_cell) = true; } grid_cell = grid_cell->next; } } void set_plain_sphere_fibrosis(struct grid *the_grid, real_cpu phi, real_cpu plain_center, real_cpu sphere_radius, real_cpu bz_size, real_cpu bz_radius, unsigned fib_seed) { print_to_stdout_and_file("Making %.2lf %% of cells inactive\n", phi * 100.0f); if(fib_seed == 0) fib_seed = (unsigned)time(NULL) + getpid(); srand(fib_seed); print_to_stdout_and_file("Using %u as seed\n", fib_seed); real_cpu bz_radius_2 = pow(bz_radius, 2.0); real_cpu sphere_radius_2 = pow(sphere_radius, 2.0); struct cell_node *grid_cell; grid_cell = the_grid->first_cell; while(grid_cell != 0) { real_cpu distance = pow(grid_cell->center.x - plain_center, 2.0) + pow(grid_cell->center.y - plain_center, 2.0); if(grid_cell->active) { INITIALIZE_FIBROTIC_INFO(grid_cell); if(distance <= bz_radius_2) { if(distance <= sphere_radius_2) { FIBROTIC(grid_cell) = true; } else { BORDER_ZONE(grid_cell) = true; } } } grid_cell = grid_cell->next; } grid_cell = the_grid->first_cell; while(grid_cell != 0) { if(grid_cell->active) { if(FIBROTIC(grid_cell)) { real_cpu p = (real_cpu)(rand()) / (RAND_MAX); if(p < phi) grid_cell->active = false; grid_cell->can_change = false; } else if(BORDER_ZONE(grid_cell)) { real_cpu distance_from_center = sqrt((grid_cell->center.x - plain_center) * (grid_cell->center.x - plain_center) + (grid_cell->center.y - plain_center) * (grid_cell->center.y - plain_center)); distance_from_center = (distance_from_center - sphere_radius) / bz_size; real_cpu phi_local = phi - phi * distance_from_center; real_cpu p = (real_cpu)(rand()) / (RAND_MAX); if(p < phi_local) grid_cell->active = false; grid_cell->can_change = false; } } grid_cell = grid_cell->next; } } void set_plain_sphere_fibrosis_with_fibrotic_hole (struct grid *the_grid, real_cpu phi, real_cpu plain_center, real_cpu sphere_radius, real_cpu bz_size, real_cpu bz_radius, real_cpu fib_hole_radius,unsigned fib_seed) { print_to_stdout_and_file("Making %.2lf %% of cells inactive\n", phi * 100.0f); if(fib_seed == 0) fib_seed = (unsigned)time(NULL) + getpid(); srand(fib_seed); print_to_stdout_and_file("Using %u as seed\n", fib_seed); real_cpu bz_radius_2 = pow(bz_radius, 2.0); real_cpu sphere_radius_2 = pow(sphere_radius, 2.0); real_cpu fib_radius_2 = pow(fib_hole_radius, 2.0); struct cell_node *grid_cell; grid_cell = the_grid->first_cell; while(grid_cell != 0) { real_cpu distance = pow(grid_cell->center.x - plain_center, 2.0) + pow(grid_cell->center.y - plain_center, 2.0); if(grid_cell->active) { INITIALIZE_FIBROTIC_INFO(grid_cell); if(distance <= bz_radius_2) { if(distance <= sphere_radius_2) { FIBROTIC(grid_cell) = true; } if (distance <= fib_radius_2){ grid_cell-> active = false; grid_cell->can_change = false; } else { BORDER_ZONE(grid_cell) = true; } } } grid_cell = grid_cell->next; } grid_cell = the_grid->first_cell; while(grid_cell != 0) { if(grid_cell->active) { if(FIBROTIC(grid_cell)) { real_cpu p = (real_cpu)(rand()) / (RAND_MAX); if(p < phi) grid_cell->active = false; grid_cell->can_change = false; } else if(BORDER_ZONE(grid_cell)) { real_cpu distance_from_center = sqrt((grid_cell->center.x - plain_center) * (grid_cell->center.x - plain_center) + (grid_cell->center.y - plain_center) * (grid_cell->center.y - plain_center)); distance_from_center = (distance_from_center - sphere_radius) / bz_size; real_cpu phi_local = phi - phi * distance_from_center; real_cpu p = (real_cpu)(rand()) / (RAND_MAX); if(p < phi_local) grid_cell->active = false; grid_cell->can_change = false; } } grid_cell = grid_cell->next; } } void set_human_mesh_fibrosis(struct grid *grid, real_cpu phi, unsigned seed, real_cpu big_scar_center_x, real_cpu big_scar_center_y, real_cpu big_scar_center_z, real_cpu small_scar_center_x, real_cpu small_scar_center_y, real_cpu small_scar_center_z) { if(seed == 0) seed = (unsigned)time(NULL) + getpid(); srand(seed); print_to_stdout_and_file("Using %u as seed\n", seed); real_cpu bz_size_big = 0; real_cpu bz_size_small = 0; real_cpu dist_big = 0; real_cpu dist_small = 0; print_to_stdout_and_file("Calculating fibrosis using phi: %lf\n", phi); struct cell_node *grid_cell = grid->first_cell; while(grid_cell != NULL) { if(grid_cell->active) { if(FIBROTIC(grid_cell)) { grid_cell->can_change = false; real_cpu p = (real_cpu)(rand()) / (RAND_MAX); if(p < phi) grid_cell->active = false; } else if(BORDER_ZONE(grid_cell)) { real_cpu centerX = grid_cell->center.x; real_cpu centerY = grid_cell->center.y; real_cpu centerZ = grid_cell->center.z; if(SCAR_TYPE(grid_cell) == 'b') { dist_big = sqrt((centerX - big_scar_center_x) * (centerX - big_scar_center_x) + (centerY - big_scar_center_y) * (centerY - big_scar_center_y) + (centerZ - big_scar_center_z) * (centerZ - big_scar_center_z)); if(dist_big > bz_size_big) { bz_size_big = dist_big; } } else if(SCAR_TYPE(grid_cell) == 's') { dist_small = sqrt((centerX - small_scar_center_x) * (centerX - small_scar_center_x) + (centerY - small_scar_center_y) * (centerY - small_scar_center_y) + (centerZ - small_scar_center_z) * (centerZ - small_scar_center_z)); if(dist_small > bz_size_small) { bz_size_small = dist_small; } } } } grid_cell = grid_cell->next; } grid_cell = grid->first_cell; while(grid_cell != NULL) { if(grid_cell->active) { if(BORDER_ZONE(grid_cell)) { real_cpu centerX = grid_cell->center.x; real_cpu centerY = grid_cell->center.y; real_cpu centerZ = grid_cell->center.z; if(SCAR_TYPE(grid_cell) == 'b') { dist_big = sqrt((centerX - big_scar_center_x) * (centerX - big_scar_center_x) + (centerY - big_scar_center_y) * (centerY - big_scar_center_y) + (centerZ - big_scar_center_z) * (centerZ - big_scar_center_z)); dist_big = dist_big / bz_size_big; real_cpu phi_local = phi - phi * dist_big; real_cpu p = (real_cpu)(rand()) / (RAND_MAX); if(p < phi_local) { grid_cell->active = false; } grid_cell->can_change = false; } else if(SCAR_TYPE(grid_cell) == 's') { dist_small = sqrt((centerX - small_scar_center_x) * (centerX - small_scar_center_x) + (centerY - small_scar_center_y) * (centerY - small_scar_center_y) + (centerZ - small_scar_center_z) * (centerZ - small_scar_center_z)); dist_small = dist_small / bz_size_small; real_cpu phi_local = phi - phi * dist_small; real_cpu p = (real_cpu)(rand()) / (RAND_MAX); if(p < phi_local) { grid_cell->active = false; } grid_cell->can_change = false; } } } grid_cell = grid_cell->next; } } void set_human_mesh_fibrosis_from_file(struct grid *grid, char type, const char *filename, int size) { FILE *file = fopen(filename, "r"); if(!file) { printf("Error opening file %s!!\n", filename); exit(0); } real_cpu **scar_mesh = (real_cpu **)malloc(sizeof(real_cpu *) * size); for(int i = 0; i < size; i++) { scar_mesh[i] = (real_cpu *)malloc(sizeof(real_cpu) * 3); if(scar_mesh[i] == NULL) { printf("Failed to allocate memory\n"); exit(0); } } real_cpu dummy1, dummy2; // unused values int i = 0; while(!feof(file)) { fscanf(file, "%lf,%lf,%lf,%lf,%lf\n", &scar_mesh[i][0], &scar_mesh[i][1], &scar_mesh[i][2], &dummy1, &dummy2); i++; } fclose(file); sort_vector(scar_mesh, size); struct cell_node *grid_cell = grid->first_cell; while(grid_cell != 0) { real_cpu center_x = grid_cell->center.x; real_cpu center_y = grid_cell->center.y; real_cpu center_z = grid_cell->center.z; if((grid_cell->discretization.x == 100.0) && (SCAR_TYPE(grid_cell) == type)) { int index = inside_mesh(scar_mesh, center_x, center_y, center_z, 0, size - 1); grid_cell->active = (index != -1); } grid_cell = grid_cell->next; } for(int k = 0; k < size; k++) { free(scar_mesh[k]); } free(scar_mesh); } void set_fibrosis_from_file(struct grid *grid, const char *filename, int size) { FILE *file = fopen(filename, "r"); if(!file) { printf("Error opening file %s!!\n", filename); exit(0); } real_cpu **scar_mesh = (real_cpu **)malloc(sizeof(real_cpu *) * size); for(int i = 0; i < size; i++) { scar_mesh[i] = (real_cpu *)malloc(sizeof(real_cpu) * 7); if(scar_mesh[i] == NULL) { printf("Failed to allocate memory\n"); exit(0); } } for(int i = 0; i < size; i++) { fscanf(file, "%lf,%lf,%lf,%lf,%lf,%lf,%lf\n", &scar_mesh[i][0], &scar_mesh[i][1], &scar_mesh[i][2], &scar_mesh[i][3], &scar_mesh[i][4], &scar_mesh[i][5], &scar_mesh[i][6]); } fclose(file); #pragma omp parallel for for(int j = 0; j < size; j++) { struct cell_node *grid_cell = grid->first_cell; real_cpu b_center_x = scar_mesh[j][0]; real_cpu b_center_y = scar_mesh[j][1]; real_cpu b_h_dx = scar_mesh[j][3]; real_cpu b_h_dy = scar_mesh[j][4]; bool active = (bool) (scar_mesh[j][6]); int c = 0; while (grid_cell != 0) { if(grid_cell->active) { real_cpu center_x = grid_cell->center.x; real_cpu center_y = grid_cell->center.y; real_cpu half_dy = grid_cell->discretization.y/2.0; if(FIBROTIC_INFO(grid_cell) == NULL) { INITIALIZE_FIBROTIC_INFO(grid_cell); FIBROTIC(grid_cell) = 1; } struct point_3d p; p.x = b_center_x + b_h_dx; p.y = b_center_y - b_h_dy; if (center_x == b_center_x && center_y + half_dy <= p.x && center_y - half_dy >= p.y) { grid_cell->active = active; c++; } } if(c == 4) break; grid_cell = grid_cell->next; } } for(int k = 0; k < size; k++) { free(scar_mesh[k]); } free(scar_mesh); } void set_plain_fibrosis_inside_region (struct grid *the_grid, real_cpu phi, unsigned fib_seed,\ const double min_x, const double max_x,\ const double min_y, const double max_y,\ const double min_z, const double max_z) { print_to_stdout_and_file("Making %.2lf %% of cells inside the region inactive\n", phi * 100.0); struct cell_node *grid_cell; if(fib_seed == 0) fib_seed = (unsigned)time(NULL) + getpid(); srand(fib_seed); print_to_stdout_and_file("Using %u as seed\n", fib_seed); grid_cell = the_grid->first_cell; while(grid_cell != 0) { real center_x = grid_cell->center.x; real center_y = grid_cell->center.y; real center_z = grid_cell->center.z; if (center_x >= min_x && center_x <= max_x &&\ center_y >= min_y && center_y <= max_y &&\ center_z >= min_z && center_z <= max_z) { if(grid_cell->active) { real_cpu p = (real_cpu)(rand()) / (RAND_MAX); if(p < phi) { grid_cell->active = false; } INITIALIZE_FIBROTIC_INFO(grid_cell); FIBROTIC(grid_cell) = true; } } grid_cell = grid_cell->next; } }
_Atomic-3.c
/* PR c/65467 */ /* { dg-do compile } */ /* { dg-additional-options "-std=c11" } */ void f1 (void) { _Atomic int i = 0, k[4]; int j = 0; k[0] = 0; k[1] = 0; k[2] = 0; k[3] = 0; #pragma omp parallel reduction (+:i) /* { dg-error "'_Atomic' 'i' in 'reduction' clause" } */ i++; #pragma omp declare reduction (foo: _Atomic int: omp_out += omp_in) initializer (omp_priv = omp_orig * 0) /* { dg-error "'_Atomic' qualified type in '#pragma omp declare reduction'" } */ #pragma omp declare reduction (bar: int: omp_out += omp_in) initializer (omp_priv = omp_orig * 0) #pragma omp parallel reduction (bar:j) j++; #pragma omp parallel reduction (bar:i) /* { dg-error "'_Atomic' 'i' in 'reduction' clause" } */ i++; #pragma omp parallel reduction (+:k) /* { dg-error "'_Atomic' 'k' in 'reduction' clause" } */ k[1]++; #pragma omp parallel reduction (+:k[1:2]) /* { dg-error "'_Atomic' \[^\n\r]* in 'reduction' clause" } */ k[1]++; } void f2 (int *_Atomic p) { #pragma omp simd aligned (p : 16) /* { dg-error "'_Atomic' 'p' in 'aligned' clause" } */ for (int i = 0; i < 16; i++) p[i]++; } _Atomic int x; void f3 (_Atomic int *p) { int i; #pragma omp atomic write x = 6; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic read i = x; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic update x += 6; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic capture i = x *= 2; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic write p[2] = 6; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic read i = p[2]; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic update p[2] += 6; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ #pragma omp atomic capture i = p[2] *= 2; /* { dg-error "'_Atomic' expression in '#pragma omp atomic'" } */ } #pragma omp declare simd linear(x:1) /* { dg-error "'_Atomic' 'x' in 'linear' clause" } */ int f4 (_Atomic int x, int y) { return x + y; }
gemv_x_coo.c
#include "alphasparse/kernel.h" #include "alphasparse/kernel_plain.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t gemv_coo_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT nnz = A->nnz; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < nnz; i++) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT r = A->row_indx[i]; const ALPHA_INT c = A->col_indx[i]; ALPHA_Number v; alpha_mul(v, A->values[i], x[c]); alpha_madde(tmp[threadId][r], alpha, v); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for (ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT thread_num = alpha_get_thread_num(); return gemv_coo_omp(alpha, A, x, beta, y); }
GB_binop__pair_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_uint32) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__pair_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = 1 #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT32 || GxB_NO_PAIR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
HelloOMP.c
/* gcc -fopenmp -O3 -Wall HelloOMP.c -o HelloOMP */ #include <stdio.h> #include <omp.h> int main(void) { #pragma omp parallel printf("[%d]: hello world!\n", omp_get_thread_num()); return 0; }
GB_emult_template.c
//------------------------------------------------------------------------------ // GB_emult_template: phase1 and phase2 for C=A.*B, C<M>=A.*B //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Computes C=A.*B (no mask) or C<M>=A.*B (mask present and not complemented). // Does not handle the case C<!M>=A.*B. The complemented mask is handled in // GB_mask instead. If present, the mask M is assumed to be very sparse // compared with A and B. // phase1: does not compute C itself, but just counts the # of entries in each // vector of C. Fine tasks compute the # of entries in their slice of a // single vector of C, and the results are cumsum'd. // phase2: computes C, using the counts computed by phase1. { // iB_first is unused if the operator is FIRST or PAIR #include "GB_unused.h" //-------------------------------------------------------------------------- // get A, B, M, and C //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ah = A->h ; const int64_t *GB_RESTRICT Ai = A->i ; const int64_t vlen = A->vlen ; const int64_t *GB_RESTRICT Bp = B->p ; const int64_t *GB_RESTRICT Bh = B->h ; const int64_t *GB_RESTRICT Bi = B->i ; const int64_t *GB_RESTRICT Mp = NULL ; const int64_t *GB_RESTRICT Mh = NULL ; const int64_t *GB_RESTRICT Mi = NULL ; const GB_void *GB_RESTRICT Mx = NULL ; size_t msize = 0 ; if (M != NULL) { Mp = M->p ; Mh = M->h ; Mi = M->i ; Mx = (Mask_struct ? NULL : (M->x)) ; msize = M->type->size ; } #if defined ( GB_PHASE_2_OF_2 ) const GB_ATYPE *GB_RESTRICT Ax = A->x ; const GB_ATYPE *GB_RESTRICT Bx = B->x ; const int64_t *GB_RESTRICT Cp = C->p ; const int64_t *GB_RESTRICT Ch = C->h ; int64_t *GB_RESTRICT Ci = C->i ; GB_CTYPE *GB_RESTRICT Cx = C->x ; #endif //-------------------------------------------------------------------------- // phase1: count entries in each C(:,j); phase2: compute C //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast == -1) ; int64_t len ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; len = TaskList [taskid].len ; } else { // a coarse task operates on one or more whole vectors len = vlen ; } for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of C //------------------------------------------------------------------ int64_t j = (Ch == NULL) ? k : Ch [k] ; #if defined ( GB_PHASE_1_OF_2 ) int64_t cjnz = 0 ; #else int64_t pC, pC_end ; if (fine_task) { // A fine task computes a slice of C(:,j) pC = TaskList [taskid ].pC ; pC_end = TaskList [taskid+1].pC ; ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ; } else { // The vectors of C are never sliced for a coarse task. pC = Cp [k] ; pC_end = Cp [k+1] ; } int64_t cjnz = pC_end - pC ; if (cjnz == 0) continue ; #endif //------------------------------------------------------------------ // get A(:,j) //------------------------------------------------------------------ int64_t pA = -1, pA_end = -1 ; if (fine_task) { // A fine task operates on Ai,Ax [pA...pA_end-1], which is // A fine task operates on Ai,Ax [pA...pA_end-1], which is // a subset of the vector A(:,j) pA = TaskList [taskid].pA ; pA_end = TaskList [taskid].pA_end ; } else { // A coarse task operates on the entire vector A (:,j) int64_t kA = (Ch == Ah) ? k : ((C_to_A == NULL) ? j : C_to_A [k]) ; if (kA >= 0) { pA = Ap [kA] ; pA_end = Ap [kA+1] ; } } int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice bool adense = (ajnz == len) ; int64_t pA_start = pA ; // get the first and last indices in A(:,j) for this vector int64_t iA_first = -1 ; if (ajnz > 0) { iA_first = Ai [pA] ; } #if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG ) int64_t iA_last = -1 ; if (ajnz > 0) { iA_last = Ai [pA_end-1] ; } #endif //------------------------------------------------------------------ // get B(:,j) //------------------------------------------------------------------ int64_t pB = -1, pB_end = -1 ; if (fine_task) { // A fine task operates on Bi,Bx [pB...pB_end-1], which is // a subset of the vector B(:,j) pB = TaskList [taskid].pB ; pB_end = TaskList [taskid].pB_end ; } else { // A coarse task operates on the entire vector B (:,j) int64_t kB = (Ch == Bh) ? k : ((C_to_B == NULL) ? j : C_to_B [k]) ; if (kB >= 0) { pB = Bp [kB] ; pB_end = Bp [kB+1] ; } } int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice bool bdense = (bjnz == len) ; int64_t pB_start = pB ; // get the first and last indices in B(:,j) for this vector int64_t iB_first = -1 ; if (bjnz > 0) { iB_first = Bi [pB] ; } #if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG ) int64_t iB_last = -1 ; if (bjnz > 0) { iB_last = Bi [pB_end-1] ; } #endif //------------------------------------------------------------------ // phase1: count nnz (C (:,j)); phase2: compute C(:,j) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (ajnz == 0 || bjnz == 0) { //-------------------------------------------------------------- // A(:,j) and/or B(:,j) are empty //-------------------------------------------------------------- ; } else if (iA_last < iB_first || iB_last < iA_first) { //-------------------------------------------------------------- // intersection of A(:,j) and B(:,j) is empty //-------------------------------------------------------------- // the last entry of A(:,j) comes before the first entry // of B(:,j), or visa versa ; } else #endif if (M == NULL) { if (adense && bdense) { //---------------------------------------------------------- // A(:,j) and B(:,j) dense: thus C(:,j) dense //---------------------------------------------------------- ASSERT (ajnz == bjnz) ; ASSERT (iA_first == iB_first) ; ASSERT (iA_last == iB_last ) ; #if defined ( GB_PHASE_1_OF_2 ) cjnz = ajnz ; #else ASSERT (cjnz == ajnz) ; for (int64_t p = 0 ; p < ajnz ; p++) { Ci [pC + p] = p + iA_first ; GB_GETA (aij, Ax, pA + p) ; GB_GETB (bij, Bx, pB + p) ; GB_BINOP (GB_CX (pC + p), aij, bij) ; } #endif } else if (adense) { //---------------------------------------------------------- // A(:,j) is dense, B(:,j) is sparse: thus C(:,j) sparse //---------------------------------------------------------- #if defined ( GB_PHASE_1_OF_2 ) cjnz = bjnz ; #else ASSERT (cjnz == bjnz) ; for (int64_t p = 0 ; p < bjnz ; p++) { int64_t i = Bi [pB + p] ; Ci [pC + p] = i ; GB_GETA (aij, Ax, pA + i - iA_first) ; GB_GETB (bij, Bx, pB + p) ; GB_BINOP (GB_CX (pC + p), aij, bij) ; } #endif } else if (bdense) { //---------------------------------------------------------- // A(:,j) is sparse, B(:,j) is dense: thus C(:,j) sparse //---------------------------------------------------------- #if defined ( GB_PHASE_1_OF_2 ) cjnz = ajnz ; #else ASSERT (cjnz == ajnz) ; for (int64_t p = 0 ; p < ajnz ; p++) { int64_t i = Ai [pA + p] ; Ci [pC + p] = i ; GB_GETA (aij, Ax, pA + p) ; GB_GETB (bij, Bx, pB + i - iB_first) ; GB_BINOP (GB_CX (pC + p), aij, bij) ; } #endif } else if (ajnz > 32 * bjnz) { //---------------------------------------------------------- // A(:,j) is much denser than B(:,j) //---------------------------------------------------------- for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; // find i in A(:,j) int64_t pright = pA_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Ai, pA, pright, found) ; if (found) { #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else if (bjnz > 32 * ajnz) { //---------------------------------------------------------- // B(:,j) is much denser than A(:,j) //---------------------------------------------------------- for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; // find i in B(:,j) int64_t pright = pB_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Bi, pB, pright, found) ; if (found) { #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij) ; pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else { //---------------------------------------------------------- // A(:,j) and B(:,j) have about the same # of entries //---------------------------------------------------------- // linear-time scan of A(:,j) and B(:,j) while (pA < pA_end && pB < pB_end) { int64_t iA = Ai [pA] ; int64_t iB = Bi [pB] ; if (iA < iB) { // A(i,j) exists but not B(i,j) pA++ ; } else if (iB < iA) { // B(i,j) exists but not A(i,j) pB++ ; } else { // both A(i,j) and B(i,j) exist #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = iB ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij) ; pC++ ; #endif pA++ ; pB++ ; } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } } else { //-------------------------------------------------------------- // Mask is present //-------------------------------------------------------------- int64_t pM = -1 ; int64_t pM_end = -1 ; if (fine_task) { // A fine task operates on Mi,Mx [pM...pM_end-1], which is // a subset of the vector M(:,j) pM = TaskList [taskid].pM ; pM_end = TaskList [taskid].pM_end ; } else { int64_t kM = -1 ; if (Ch == Mh) { // Ch is the same as Mh (a shallow copy), or both NULL kM = k ; } else { kM = (C_to_M == NULL) ? j : C_to_M [k] ; } if (kM >= 0) { pM = Mp [kM] ; pM_end = Mp [kM+1] ; } } //-------------------------------------------------------------- // C(:,j)<M(:,j) = A(:,j) .* B (:,j) //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // get M(i,j) for A(i,j) .* B (i,j) //---------------------------------------------------------- int64_t i = Mi [pM] ; bool mij = GB_mcast (Mx, pM, msize) ; if (!mij) continue ; //---------------------------------------------------------- // get A(i,j) //---------------------------------------------------------- if (adense) { // A(:,j) is dense; use direct lookup for A(i,j) pA = pA_start + i - iA_first ; } else { // A(:,j) is sparse; use binary search for A(i,j) int64_t apright = pA_end - 1 ; bool afound ; GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ; if (!afound) continue ; } ASSERT (Ai [pA] == i) ; //---------------------------------------------------------- // get B(i,j) //---------------------------------------------------------- if (bdense) { // B(:,j) is dense; use direct lookup for B(i,j) pB = pB_start + i - iB_first ; } else { // B(:,j) is sparse; use binary search for B(i,j) int64_t bpright = pB_end - 1 ; bool bfound ; GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ; if (!bfound) continue ; } ASSERT (Bi [pB] == i) ; //---------------------------------------------------------- // C(i,j) = A(i,j) .* B(i,j) //---------------------------------------------------------- // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; GB_GETA (aij, Ax, pA) ; GB_GETB (bij, Bx, pB) ; GB_BINOP (GB_CX (pC), aij, bij) ; pC++ ; #endif } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } //------------------------------------------------------------------ // final count of nnz (C (:,j)) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (fine_task) { TaskList [taskid].pC = cjnz ; } else { Cp [k] = cjnz ; } #endif } } }
atomic.c
#include<stdio.h> #include<omp.h> int big_ugly(int B){ return 2*B; } double doit(){ return 2.24; } int main(){ double X = 0; #pragma omp parallel { int id = omp_get_thread_num(); printf("%d\n", id); double tmp, B; B = doit(); tmp = big_ugly(B); // atomic #pragma omp atomic X += tmp; } printf("X = %f\n", X); return 0; }
GB_unop__identity_fp32_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp32_fp64) // op(A') function: GB (_unop_tran__identity_fp32_fp64) // C type: float // A type: double // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp32_fp64) ( float *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp32_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_sort_template.c
//------------------------------------------------------------------------------ // GB_sort_template: sort all vectors in a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // macros: // GB_SORT (func) defined as GB_sort_func_TYPE_ascend or _descend, // GB_msort_ISO_ascend or _descend, // or GB_msort_func_UDT // GB_TYPE bool, int8_, ... or GB_void for UDT or ISO // GB_ADDR(A,p) A+p for builtin, A + p * GB_SIZE otherwise // GB_SIZE size of each entry: sizeof (GB_TYPE) for built-in // GB_GET(x,X,i) x = X [i] for built-in, memcpy for UDT // GB_COPY(A,i,C,k) A[i] = C [k] // GB_SWAP(A,i,k) swap A[i] and A[k] // GB_LT compare two entries, x < y, or x > y for descending sort //------------------------------------------------------------------------------ // GB_SORT (partition): use a pivot to partition an array //------------------------------------------------------------------------------ // C.A.R Hoare partition method, partitions an array in-place via a pivot. // k = partition (A, n) partitions A [0:n-1] such that all entries in // A [0:k] are <= all entries in A [k+1:n-1]. static inline int64_t GB_SORT (partition) ( GB_TYPE *restrict A_0, // size n arrays to partition int64_t *restrict A_1, // size n array const int64_t n, // size of the array(s) to partition uint64_t *seed // random number seed, modified on output #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { // select a pivot at random int64_t pivot = ((n < GB_RAND_MAX) ? GB_rand15 (seed) : GB_rand (seed)) % n; // Pivot = A [pivot] GB_GET (Pivot0, A_0, pivot) ; // Pivot0 = A_0 [pivot] int64_t Pivot1 = A_1 [pivot] ; // At the top of the while loop, A [left+1...right-1] is considered, and // entries outside this range are in their proper place and not touched. // Since the input specification of this function is to partition A // [0..n-1], left must start at -1 and right must start at n. int64_t left = -1 ; int64_t right = n ; // keep partitioning until the left and right sides meet while (true) { // loop invariant: A [0..left] < pivot and A [right..n-1] > Pivot, // so the region to be considered is A [left+1 ... right-1]. // increment left until finding an entry A [left] >= Pivot bool less ; do { left++ ; // a0 = A_0 [left] GB_GET (a0, A_0, left) ; // less = (a0, A_1 [left]) < (Pivot0, Pivot1) GB_LT (less, a0, A_1 [left], Pivot0, Pivot1) ; } while (less) ; // decrement right until finding an entry A [right] <= Pivot do { right-- ; // a0 = A_0 [right] GB_GET (a1, A_0, right) ; // less = (Pivot0, Pivot1) < (a1, A_1 [right]) GB_LT (less, Pivot0, Pivot1, a1, A_1 [right]) ; } while (less) ; // now A [0..left-1] < pivot and A [right+1..n-1] > pivot, but // A [left] > pivot and A [right] < pivot, so these two entries // are out of place and must be swapped. // However, if the two sides have met, the partition is finished. if (left >= right) { // A has been partitioned into A [0:right] and A [right+1:n-1]. // k = right+1, so A is split into A [0:k-1] and A [k:n-1]. return (right + 1) ; } // since A [left] > pivot and A [right] < pivot, swap them GB_SWAP (A_0, left, right) ; int64_t t1 = A_1 [left] ; A_1 [left] = A_1 [right] ; A_1 [right] = t1 ; // after the swap this condition holds: // A [0..left] < pivot and A [right..n-1] > pivot } } //------------------------------------------------------------------------------ // GB_SORT (quicksort): recursive single-threaded quicksort //------------------------------------------------------------------------------ static void GB_SORT (quicksort) // sort A [0:n-1] ( GB_TYPE *restrict A_0, // size n arrays to sort int64_t *restrict A_1, // size n array const int64_t n, // size of the array(s) to sort uint64_t *seed // random number seed #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { if (n < 20) { // in-place insertion sort on A [0:n-1], where n is small for (int64_t k = 1 ; k < n ; k++) { for (int64_t j = k ; j > 0 ; j--) { // a0 = A_0 [j] GB_GET (a0, A_0, j) ; // a1 = A_0 [j-1] GB_GET (a1, A_0, j-1) ; // break if A [j] >= A [j-1] bool less ; // less = (a0, A_1 [j]) < (a1, A_1 [j-1]) GB_LT (less, a0, A_1 [j], a1, A_1 [j-1]) ; if (!less) break ; // swap A [j-1] and A [j] GB_SWAP (A_0, j-1, j) ; int64_t t1 = A_1 [j-1] ; A_1 [j-1] = A_1 [j] ; A_1 [j] = t1 ; } } } else { // partition A [0:n-1] into A [0:k-1] and A [k:n-1] int64_t k = GB_SORT (partition) (A_0, A_1, n, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; // sort each partition // sort A [0:k-1] GB_SORT (quicksort) (A_0, A_1, k, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; // sort A [k:n-1] GB_SORT (quicksort) (GB_ADDR (A_0, k), A_1 + k, n-k, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } } //------------------------------------------------------------------------------ // GB_SORT (binary_search): binary search for the pivot //------------------------------------------------------------------------------ // The Pivot value is Y [pivot], and a binary search for the Pivot is made in // the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on // input. The return value is pleft, where // // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. // // pleft is returned in the range p_start to p_end. If pleft is p_start, then // the Pivot is smaller than all entries in X [p_start...p_end-1], and the left // list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is // larger than all entries in X [p_start...p_end-1], and the right list X // [pleft...p_end-1] is empty. static int64_t GB_SORT (binary_search) // return pleft ( const GB_TYPE *restrict Y_0, // Pivot is Y [pivot] const int64_t *restrict Y_1, const int64_t pivot, const GB_TYPE *restrict X_0, // search in X [p_start..p_end_-1] const int64_t *restrict X_1, const int64_t p_start, const int64_t p_end #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // find where the Pivot appears in X //-------------------------------------------------------------------------- // binary search of X [p_start...p_end-1] for the Pivot int64_t pleft = p_start ; int64_t pright = p_end - 1 ; GB_GET (Pivot0, Y_0, pivot) ; // Pivot0 = Y_0 [pivot] int64_t Pivot1 = Y_1 [pivot] ; bool less ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) >> 1 ; // x0 = X_0 [pmiddle] GB_GET (x0, X_0, pmiddle) ; // less = (x0, X_1 [pmiddle]) < (Pivot0, Pivot1) GB_LT (less, x0, X_1 [pmiddle], Pivot0, Pivot1) ; pleft = less ? (pmiddle+1) : pleft ; pright = less ? pright : pmiddle ; } // binary search is narrowed down to a single item // or it has found the list is empty: ASSERT (pleft == pright || pleft == pright + 1) ; // If found is true then X [pleft == pright] == Pivot. If duplicates // appear then X [pleft] is any one of the entries equal to the Pivot // in the list. If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft+1 ... p_end-1] > Pivot holds. // The value X [pleft] may be either < or > Pivot. bool found = (pleft == pright) && (X_1 [pleft] == Pivot1) ; // Modify pleft and pright: if (!found && (pleft == pright)) { // x0 = X_0 [pleft] GB_GET (x0, X_0, pleft) ; // less = (x0, X_1 [pleft]) < (Pivot0, Pivot1) GB_LT (less, x0, X_1 [pleft], Pivot0, Pivot1) ; if (less) { pleft++ ; } else { // pright++ ; // (not needed) } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] > Pivot holds, // and pleft-1 == pright // If X has no duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] >= Pivot holds. // If X has duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. return (pleft) ; } //------------------------------------------------------------------------------ // GB_SORT (create_merge_tasks) //------------------------------------------------------------------------------ // Recursively constructs ntasks tasks to merge two arrays, Left and Right, // into Sresult, where Left is L [pL_start...pL_end-1], Right is R // [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1], // and where total_work is the total size of Left and Right. // // Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and // R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output // array S [S_task [tid] ... ]. The task tids created are t0 to // t0+ntasks-1. static void GB_SORT (create_merge_tasks) ( // output: int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const GB_TYPE *restrict L_0, // Left = L [pL_start...pL_end-1] const int64_t *restrict L_1, const int64_t pL_start, const int64_t pL_end, const GB_TYPE *restrict R_0, // Right = R [pR_start...pR_end-1] const int64_t *restrict R_1, const int64_t pR_start, const int64_t pR_end #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // get problem size //-------------------------------------------------------------------------- int64_t nleft = pL_end - pL_start ; // size of Left array int64_t nright = pR_end - pR_start ; // size of Right array int64_t total_work = nleft + nright ; // total work to do ASSERT (ntasks >= 1) ; ASSERT (total_work > 0) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks == 1) { //---------------------------------------------------------------------- // a single task will merge all of Left and Right into Sresult //---------------------------------------------------------------------- L_task [t0] = pL_start ; L_len [t0] = nleft ; R_task [t0] = pR_start ; R_len [t0] = nright ; S_task [t0] = pS_start ; } else { //---------------------------------------------------------------------- // partition the Left and Right arrays for multiple merge tasks //---------------------------------------------------------------------- int64_t pleft, pright ; if (nleft >= nright) { // split Left in half, and search for its pivot in Right pleft = (pL_end + pL_start) >> 1 ; pright = GB_SORT (binary_search) ( L_0, L_1, pleft, R_0, R_1, pR_start, pR_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } else { // split Right in half, and search for its pivot in Left pright = (pR_end + pR_start) >> 1 ; pleft = GB_SORT (binary_search) ( R_0, R_1, pright, L_0, L_1, pL_start, pL_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //---------------------------------------------------------------------- // partition the tasks according to the work of each partition //---------------------------------------------------------------------- // work0 is the total work in the first partition int64_t work0 = (pleft - pL_start) + (pright - pR_start) ; int ntasks0 = (int) round ((double) ntasks * (((double) work0) / ((double) total_work))) ; // ensure at least one task is assigned to each partition ntasks0 = GB_IMAX (ntasks0, 1) ; ntasks0 = GB_IMIN (ntasks0, ntasks-1) ; int ntasks1 = ntasks - ntasks0 ; //---------------------------------------------------------------------- // assign ntasks0 to the first half //---------------------------------------------------------------------- // ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1] // into the result S [pS_start...work0-1]. GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start, L_0, L_1, pL_start, pleft, R_0, R_1, pR_start, pright #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; //---------------------------------------------------------------------- // assign ntasks1 to the second half //---------------------------------------------------------------------- // ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1] // into the result S [pS_start+work0...pS_start+total_work]. int t1 = t0 + ntasks0 ; // first task id of the second set of tasks int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1, L_0, L_1, pleft, pL_end, R_0, R_1, pright, pR_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } } //------------------------------------------------------------------------------ // GB_SORT (merge): merge two sorted lists via a single thread //------------------------------------------------------------------------------ // merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */ static void GB_SORT (merge) ( GB_TYPE *restrict S_0, // output of length nleft + nright int64_t *restrict S_1, const GB_TYPE *restrict Left_0, // left input of length nleft const int64_t *restrict Left_1, const int64_t nleft, const GB_TYPE *restrict Right_0, // right input of length nright const int64_t *restrict Right_1, const int64_t nright #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { int64_t p, pleft, pright ; // merge the two inputs, Left and Right, while both inputs exist for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++) { // left0 = Left_0 [pleft] GB_GET (left0, Left_0, pleft) ; // right0 = Right_0 [pright] GB_GET (right0, Right_0, pright) ; bool less ; // less = (left0, Left_1 [pleft]) < (right0, Right_1 [pright]) GB_LT (less, left0, Left_1 [pleft], right0, Right_1 [pright]) ; if (less) { // S [p] = Left [pleft++] GB_COPY (S_0, p, Left_0, pleft) ; S_1 [p] = Left_1 [pleft] ; pleft++ ; } else { // S [p] = Right [pright++] GB_COPY (S_0, p, Right_0, pright) ; S_1 [p] = Right_1 [pright] ; pright++ ; } } // either input is exhausted; copy the remaining list into S if (pleft < nleft) { int64_t nremaining = (nleft - pleft) ; memcpy (GB_ADDR (S_0, p), GB_ADDR (Left_0, pleft), nremaining * GB_SIZE) ; memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ; } else if (pright < nright) { int64_t nremaining = (nright - pright) ; memcpy (GB_ADDR (S_0, p), GB_ADDR (Right_0, pright), nremaining * GB_SIZE) ; memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ; } } //------------------------------------------------------------------------------ // GB_SORT (vector) parallel mergesort of a single vector //------------------------------------------------------------------------------ static void GB_SORT (vector) // sort the pair of arrays A_0, A_1 ( GB_TYPE *restrict A_0, // size n array int64_t *restrict A_1, // size n array GB_TYPE *restrict W_0, // workspace of size n * GB_SIZE bytes int64_t *restrict W, // int64_t workspace of size n+6*ntasks+1 const int64_t n, const int kk, const int ntasks, const int nthreads // # of threads to use #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // split up workspace //-------------------------------------------------------------------------- ASSERT (nthreads > 2 && n >= GB_BASECASE) ; int64_t *T = W ; int64_t *restrict W_1 = T ; T += n ; int64_t *restrict L_task = T ; T += ntasks ; int64_t *restrict L_len = T ; T += ntasks ; int64_t *restrict R_task = T ; T += ntasks ; int64_t *restrict R_len = T ; T += ntasks ; int64_t *restrict S_task = T ; T += ntasks ; int64_t *restrict Slice = T ; T += (ntasks+1) ; //-------------------------------------------------------------------------- // partition and sort the leaves //-------------------------------------------------------------------------- GB_eslice (Slice, n, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t leaf = Slice [tid] ; int64_t leafsize = Slice [tid+1] - leaf ; uint64_t seed = tid ; GB_SORT (quicksort) (GB_ADDR (A_0, leaf), A_1 + leaf, leafsize, &seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //-------------------------------------------------------------------------- // merge each level //-------------------------------------------------------------------------- int nt = 1 ; for (int k = kk ; k >= 2 ; k -= 2) { //---------------------------------------------------------------------- // merge level k into level k-1, from A into W //---------------------------------------------------------------------- // TODO: skip k and k-1 for each group of 4 sublists of A if they are // already sorted with respect to each other. // this could be done in parallel if ntasks was large for (tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two A sublists into one W sublist GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], A_0, A_1, Slice [tid], Slice [tid+nt], A_0, A_1, Slice [tid+nt], Slice [tid+2*nt] #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_SORT (merge) ( GB_ADDR (W_0, pS), W_1 + pS, GB_ADDR (A_0, pL), A_1 + pL, nL, GB_ADDR (A_0, pR), A_1 + pR, nR #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } nt = 2*nt ; //---------------------------------------------------------------------- // merge level k-1 into level k-2, from W into A //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two W sublists into one A sublist GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], W_0, W_1, Slice [tid], Slice [tid+nt], W_0, W_1, Slice [tid+nt], Slice [tid+2*nt] #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_SORT (merge) ( GB_ADDR (A_0, pS), A_1 + pS, GB_ADDR (W_0, pL), W_1 + pL, nL, GB_ADDR (W_0, pR), W_1 + pR, nR #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } nt = 2*nt ; } } //------------------------------------------------------------------------------ // sort all vectors in a matrix //------------------------------------------------------------------------------ #undef GB_FREE_WORKSPACE #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Werk, int64_t) ; \ GB_FREE_WORK (&C_skipped, C_skipped_size) ; \ GB_FREE_WORK (&W_0, W_0_size) ; \ GB_FREE_WORK (&W, W_size) ; \ } static GrB_Info GB_SORT (matrix) ( GrB_Matrix C, // matrix sorted in-place #if GB_SORT_UDT GrB_BinaryOp op, // comparator for user-defined types only #endif GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C to sort", GB0) ; ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (GB_IS_SPARSE (C) || GB_IS_HYPERSPARSE (C)) ; #if GB_SORT_UDT ASSERT_BINARYOP_OK (op, "op", GB0) ; ASSERT (op->ztype == GrB_BOOL) ; ASSERT (op->xtype == op->ytype) ; #endif int64_t cnz = GB_nnz (C) ; if (C->iso || cnz <= 1) { // nothing to do return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // get input //-------------------------------------------------------------------------- int64_t cnvec = C->nvec ; int64_t *restrict Cp = C->p ; int64_t *restrict Ci = C->i ; GB_TYPE *restrict Cx = (GB_TYPE *) C->x ; // workspace GB_TYPE *restrict W_0 = NULL ; size_t W_0_size = 0 ; int64_t *restrict W = NULL ; size_t W_size = 0 ; int64_t *restrict C_skipped = NULL ; size_t C_skipped_size = 0 ; GB_WERK_DECLARE (Werk, int64_t) ; #if GB_SORT_UDT // get typesize, and function pointers for operators and typecasting GrB_Type ctype = C->type ; size_t csize = ctype->size ; size_t xsize = op->xtype->size ; GxB_binary_function flt = op->binop_function ; GB_cast_function fcast = GB_cast_factory (op->xtype->code, ctype->code) ; #endif //========================================================================== // phase1: sort all short vectors //========================================================================== // slice the C matrix into tasks for phase 1 GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (cnz, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ; ntasks = GB_IMIN (ntasks, cnvec) ; ntasks = GB_IMAX (ntasks, 1) ; // printf ("phase1: threads %d tasks %d\n", nthreads, ntasks) ; GB_WERK_PUSH (Werk, 3*ntasks + 2, int64_t) ; if (Werk == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int64_t *restrict C_max = Werk ; // size ntasks int64_t *restrict C_skip = Werk + ntasks ; // size ntasks+1 int64_t *restrict C_slice = Werk + 2*ntasks + 1; // size ntasks+1 GB_pslice (C_slice, Cp, cnvec, ntasks, false) ; // sort all short vectors in parallel, one thread per vector int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { const int64_t kfirst = C_slice [tid] ; const int64_t klast = C_slice [tid+1] ; int64_t task_max_length = 0 ; int64_t n_skipped = 0 ; for (int64_t k = kfirst ; k < klast ; k++) { // sort the vector C(:,k), unless it is too long const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; if (cknz <= GB_BASECASE || nthreads == 1) { // printf ("\n------------sort: %ld cknz %ld\n", k, cknz) ; uint64_t seed = k ; GB_SORT (quicksort) (GB_ADDR (Cx, pC_start), Ci + pC_start, cknz, &seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } else { // printf ("\n------------skip: %ld cknz %ld\n", k, cknz) ; n_skipped++ ; } task_max_length = GB_IMAX (task_max_length, cknz) ; } C_max [tid] = task_max_length ; C_skip [tid] = n_skipped ; } // find max vector length and return if all vectors are now sorted int64_t max_length = 0 ; for (tid = 0 ; tid < ntasks ; tid++) { max_length = GB_IMAX (max_length, C_max [tid]) ; } if (max_length <= GB_BASECASE || nthreads == 1) { // all vectors are sorted GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; } //========================================================================== // phase2: sort all long vectors in parallel //========================================================================== //-------------------------------------------------------------------------- // construct a list of vectors that must still be sorted //-------------------------------------------------------------------------- GB_cumsum (C_skip, ntasks, NULL, 1, Context) ; int64_t total_skipped = C_skip [ntasks] ; C_skipped = GB_MALLOC_WORK (total_skipped, int64_t, &C_skipped_size) ; if (C_skipped == NULL) { // out of memory GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { const int64_t kfirst = C_slice [tid] ; const int64_t klast = C_slice [tid+1] ; int64_t n_skipped = C_skip [tid] ; for (int64_t k = kfirst ; k < klast ; k++) { const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; if (cknz > GB_BASECASE) { // C(:,k) was not sorted C_skipped [n_skipped++] = k ; } } } //-------------------------------------------------------------------------- // determine # of tasks for each vector in phase 2 //-------------------------------------------------------------------------- // determine the number of levels to create, which must always be an // even number. The # of levels is chosen to ensure that the # of leaves // of the task tree is between 4*nthreads and 16*nthreads. // 2 to 4 threads: 4 levels, 16 quicksort leaves // 5 to 16 threads: 6 levels, 64 quicksort leaves // 17 to 64 threads: 8 levels, 256 quicksort leaves // 65 to 256 threads: 10 levels, 1024 quicksort leaves // 256 to 1024 threads: 12 levels, 4096 quicksort leaves // ... int kk = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ; int ntasks2 = 1 << kk ; // printf ("phase2: threads %d tasks %d skipped %ld\n", nthreads, ntasks2, // total_skipped) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- W = GB_MALLOC_WORK (max_length + 6*ntasks2 + 1, int64_t, &W_size) ; W_0 = (GB_TYPE *) GB_MALLOC_WORK (max_length * GB_SIZE, GB_void, &W_0_size) ; if (W == NULL || W_0 == NULL) { // out of memory GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // sort each long vector using all available threads //-------------------------------------------------------------------------- for (int64_t t = 0 ; t < total_skipped ; t++) { const int64_t k = C_skipped [t] ; const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; ASSERT (cknz > GB_BASECASE) ; GB_SORT (vector) (GB_ADDR (Cx, pC_start), Ci + pC_start, W_0, W, cknz, kk, ntasks2, nthreads #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; C->jumbled = true ; ASSERT_MATRIX_OK (C, "C sorted by value", GB0) ; return (GrB_SUCCESS) ; } #undef GB_SORT #undef GB_TYPE
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unaryop__abs_uint64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_bool // op(A') function: GB_tran__abs_uint64_bool // C type: uint64_t // A type: bool // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_bool ( uint64_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
WaveFunctionComponent.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2020 QMCPACK developers. // // File developed by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign // Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory // Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign // Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign // Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory // Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H #define QMCPLUSPLUS_WAVEFUNCTIONCOMPONENT_H #include "Message/Communicate.h" #include "Configuration.h" #include "Particle/ParticleSet.h" #include "Particle/VirtualParticleSet.h" #include "Particle/DistanceTableData.h" #include "OhmmsData/RecordProperty.h" #include "QMCWaveFunctions/OrbitalSetTraits.h" #include "Particle/MCWalkerConfiguration.h" #include "type_traits/template_types.hpp" #ifdef QMC_CUDA #include "type_traits/CUDATypes.h" #endif /**@file WaveFunctionComponent.h *@brief Declaration of WaveFunctionComponent */ namespace qmcplusplus { #ifdef QMC_CUDA struct NLjob { int walker; int elec; int numQuadPoints; NLjob(int w, int e, int n) : walker(w), elec(e), numQuadPoints(n) {} }; #endif ///forward declaration struct WaveFunctionComponent; struct DiffWaveFunctionComponent; class ResourceCollection; typedef WaveFunctionComponent* WaveFunctionComponentPtr; typedef DiffWaveFunctionComponent* DiffWaveFunctionComponentPtr; /**@defgroup WaveFunctionComponent group * @brief Classes which constitute a many-body trial wave function * * A many-body trial wave function is * \f[ \Psi(\{ {\bf R}\}) = \prod_i \psi_{i}(\{ {\bf R}\}), * \f] * where \f$\Psi\f$s are represented by * the derived classes from WaveFunctionComponent. */ /** @ingroup WaveFunctionComponent * @brief An abstract class for a component of a many-body trial wave function * * mw_ prefix is a function name signature indicating it is for handling a batch of WaveFunctionComponent objects * which are required to be base class pointers of the same derived class type. * all the mw_ routines must be implemented in a way either stateless or maintains states of every walker. */ struct WaveFunctionComponent : public QMCTraits { /** enum for a update mode */ enum { ORB_PBYP_RATIO, /*!< particle-by-particle ratio only */ ORB_PBYP_ALL, /*!< particle-by-particle, update Value-Gradient-Laplacian */ ORB_PBYP_PARTIAL, /*!< particle-by-particle, update Value and Grdient */ ORB_WALKER, /*!< walker update */ ORB_ALLWALKER /*!< all walkers update */ }; typedef ParticleAttrib<ValueType> ValueVectorType; typedef ParticleAttrib<GradType> GradVectorType; typedef ParticleSet::Walker_t Walker_t; typedef Walker_t::WFBuffer_t WFBufferType; typedef Walker_t::Buffer_t BufferType; typedef OrbitalSetTraits<RealType>::ValueMatrix_t RealMatrix_t; typedef OrbitalSetTraits<ValueType>::ValueMatrix_t ValueMatrix_t; typedef OrbitalSetTraits<ValueType>::GradMatrix_t GradMatrix_t; typedef OrbitalSetTraits<ValueType>::HessType HessType; typedef OrbitalSetTraits<ValueType>::HessVector_t HessVector_t; // the value type for log(psi) using LogValueType = std::complex<QTFull::RealType>; // the value type for psi(r')/psi(r) using PsiValueType = QTFull::ValueType; /** flag to set the optimization mode */ bool IsOptimizing; /** boolean to set optimization * * If true, this object is actively modified during optimization */ bool Optimizable; /** true, if this component is fermionic */ bool is_fermionic; /** current update mode */ int UpdateMode; /** current \f$\log\phi \f$ */ LogValueType LogValue; /** Pointer to the differential WaveFunctionComponent of this object * * If dPsi=0, this WaveFunctionComponent is constant with respect to the optimizable variables */ DiffWaveFunctionComponentPtr dPsi; /** A vector for \f$ \frac{\partial \nabla \log\phi}{\partial \alpha} \f$ */ GradVectorType dLogPsi; /** A vector for \f$ \frac{\partial \nabla^2 \log\phi}{\partial \alpha} \f$ */ ValueVectorType d2LogPsi; /** Name of the class derived from WaveFunctionComponent */ const std::string ClassName; /** Name of the object * It is required to be different for objects of the same derived type like multiple J1. * It can be left empty for object which is unique per many-body WF. */ const std::string myName; ///list of variables this WaveFunctionComponent handles opt_variables_type myVars; ///Bytes in WFBuffer size_t Bytes_in_WFBuffer; /// default constructor WaveFunctionComponent(const std::string& class_name, const std::string& obj_name = ""); ///default destructor virtual ~WaveFunctionComponent() {} inline void setOptimizable(bool optimizeit) { Optimizable = optimizeit; } ///assign a differential WaveFunctionComponent virtual void setDiffOrbital(DiffWaveFunctionComponentPtr d); ///assembles the full value PsiValueType getValue() const { return LogToValue<PsiValueType>::convert(LogValue); } /** check in optimizable parameters * @param active a super set of optimizable variables * * Add the paramemters this WaveFunctionComponent manage to active. */ virtual void checkInVariables(opt_variables_type& active) = 0; /** check out optimizable variables * * Update myVars index map */ virtual void checkOutVariables(const opt_variables_type& active) = 0; /** reset the parameters during optimizations */ virtual void resetParameters(const opt_variables_type& active) = 0; /** print the state, e.g., optimizables */ virtual void reportStatus(std::ostream& os) = 0; /** evaluate the value of the WaveFunctionComponent from scratch * @param P active ParticleSet * @param G Gradients, \f$\nabla\ln\Psi\f$ * @param L Laplacians, \f$\nabla^2\ln\Psi\f$ * @return the log value * * Mainly for walker-by-walker move. The initial stage of particle-by-particle * move also uses this. */ virtual LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) = 0; /** evaluate from scratch the same type WaveFunctionComponent of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param G_list the list of Gradients pointers in a walker batch, \f$\nabla\ln\Psi\f$ * @param L_list the list of Laplacians pointers in a walker batch, \f$\nabla^2\ln\Psi\f$ * @param values the log WF values of walkers in a batch */ virtual void mw_evaluateLog(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, const RefVector<ParticleSet::ParticleGradient_t>& G_list, const RefVector<ParticleSet::ParticleLaplacian_t>& L_list); /** recompute the value of the WaveFunctionComponents which require critical accuracy. * needed for Slater Determinants but not needed for most types of WaveFunctionComponents */ virtual void recompute(ParticleSet& P) {} // virtual void evaluateHessian(ParticleSet& P, IndexType iat, HessType& grad_grad_psi) // { // APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented"); // } virtual void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi_all) { APP_ABORT("WaveFunctionComponent::evaluateHessian is not implemented in " + ClassName + " class."); } /** Prepare internal data for updating WFC correspond to a particle group * It should be called before moving particles of a given group. * This call can be used to handle the precomputation of data used for moving this group of particle. * Such data should be static with respect to the moves of particles within this group. * Particle groups usually correspond to determinants of different spins. * @param P quantum particle set * @param ig particle group index */ virtual void prepareGroup(ParticleSet& P, int ig) {} virtual void mw_prepareGroup(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int ig); /** return the current gradient for the iat-th particle * @param P quantum particle set * @param iat particle index * @return the gradient of the iat-th particle */ virtual GradType evalGrad(ParticleSet& P, int iat) { APP_ABORT("WaveFunctionComponent::evalGradient is not implemented in " + ClassName + " class."); return GradType(); } /** return the current spin gradient for the iat-th particle * Default implementation assumes that WaveFunctionComponent does not explicitly depend on Spin. * @param P quantum particle set * @param iat particle index * @return the spin gradient of the iat-th particle */ virtual GradType evalGradWithSpin(ParticleSet& P, int iat, ComplexType& spingrad) { return evalGrad(P, iat); } /** compute the current gradients for the iat-th particle of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param grad_now the list of gradients in a walker batch, \f$\nabla\ln\Psi\f$ */ virtual void mw_evalGrad(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, std::vector<GradType>& grad_now); /** return the logarithmic gradient for the iat-th particle * of the source particleset * @param Pquantum particle set * @param iat particle index * @return the gradient of the iat-th particle */ virtual GradType evalGradSource(ParticleSet& P, ParticleSet& source, int iat) { // unit_test_hamiltonian calls this function incorrectly; do not abort for now // APP_ABORT("WaveFunctionComponent::evalGradSource is not implemented"); return GradType(); } /** Adds the gradient w.r.t. the iat-th particle of the * source particleset (ions) of the logarithmic gradient * and laplacian w.r.t. the target paritlceset (electrons). * @param P quantum particle set (electrons) * @param source classical particle set (ions) * @param iat particle index of source (ion) * @param the ion gradient of the elctron gradient * @param the ion gradient of the elctron laplacian. * @return the log gradient of psi w.r.t. the source particle iat */ virtual GradType evalGradSource(ParticleSet& P, ParticleSet& source, int iat, TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad, TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad) { return GradType(); } /** evaluate the ratio of the new to old WaveFunctionComponent value and the new gradient * @param P the active ParticleSet * @param iat the index of a particle * @param grad_iat Gradient for the active particle */ virtual PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat); virtual void ratioGradAsync(ParticleSet& P, int iat, PsiValueType& ratio, GradType& grad_iat); /** evaluate the ratio of the new to old WaveFunctionComponent value and the new spin gradient * Default implementation assumes that WaveFunctionComponent does not explicitly depend on Spin. * @param P the active ParticleSet * @param iat the index of a particle * @param grad_iat realspace gradient for the active particle * @param spingrad_iat spin gradient for the active particle */ virtual PsiValueType ratioGradWithSpin(ParticleSet& P, int iat, GradType& grad_iat, ComplexType& spingrad_iat) { return ratioGrad(P, iat, grad_iat); } /** compute the ratio of the new to old WaveFunctionComponent value and the new gradient of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$ * @param grad_now the list of new gradients in a walker batch, \f$\nabla\ln\Psi\f$ */ virtual void mw_ratioGrad(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, std::vector<PsiValueType>& ratios, std::vector<GradType>& grad_new); virtual void mw_ratioGradAsync(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, std::vector<PsiValueType>& ratios, std::vector<GradType>& grad_new); /** a move for iat-th particle is accepted. Update the current content. * @param P target ParticleSet * @param iat index of the particle whose new position was proposed * @param safe_to_delay if true, delayed accept is safe. */ virtual void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false) = 0; /** moves of the iat-th particle on some walkers in a batch is accepted. Update the current content. * Note that all the lists only include accepted walkers. * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param safe_to_delay if true, delayed accept is safe. */ virtual void mw_accept_rejectMove(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, const std::vector<bool>& isAccepted, bool safe_to_delay = false); /** complete all the delayed updates, must be called after each substep or step during pbyp move */ virtual void completeUpdates() {} /** complete all the delayed updates for all the walkers in a batch * must be called after each substep or step during pbyp move */ virtual void mw_completeUpdates(const RefVector<WaveFunctionComponent>& WFC_list); /** If a move for iat-th particle is rejected, restore to the content. * @param iat index of the particle whose new position was proposed * * Ye: hopefully we can gradually move away from restore */ virtual void restore(int iat) = 0; /** evaluate the ratio of the new to old WaveFunctionComponent value * @param P the active ParticleSet * @param iat the index of a particle * @return \f$ \psi( \{ {\bf R}^{'} \} )/ \psi( \{ {\bf R}\})\f$ * * Specialized for particle-by-particle move */ virtual PsiValueType ratio(ParticleSet& P, int iat) = 0; /** compute the ratio of the new to old WaveFunctionComponent value of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param iat particle index * @param ratios the list of WF ratios of a walker batch, \f$ \Psi( \{ {\bf R}^{'} \} )/ \Psi( \{ {\bf R}\})\f$ */ virtual void mw_calcRatio(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, int iat, std::vector<PsiValueType>& ratios); /** compute gradients and laplacian of the TWF with respect to each particle. * @param P particle set * @param G Gradients, \f$\nabla\ln\Psi\f$ * @param L Laplacians, \f$\nabla^2\ln\Psi\f$ * @param fromscratch if true, all the internal data are recomputed from scratch * @return log(psi) */ virtual LogValueType evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch); /** evaluate gradients and laplacian of the same type WaveFunctionComponent of multiple walkers * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param G_list the list of Gradients pointers in a walker batch, \f$\nabla\ln\Psi\f$ * @param L_list the list of Laplacians pointers in a walker batch, \f$\nabla^2\ln\Psi\f$ * @param fromscratch if true, all the internal data are recomputed from scratch */ virtual void mw_evaluateGL(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, const RefVector<ParticleSet::ParticleGradient_t>& G_list, const RefVector<ParticleSet::ParticleLaplacian_t>& L_list, bool fromscratch); /** For particle-by-particle move. Requests space in the buffer * based on the data type sizes of the objects in this class. * @param P particle set * @param buf Anonymous storage */ virtual void registerData(ParticleSet& P, WFBufferType& buf) = 0; /** For particle-by-particle move. Requests space in the buffer * based on the data type sizes of the objects in this class. * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param buf_list Anonymous storage */ virtual void mw_registerData(const std::vector<WaveFunctionComponent*>& WFC_list, const std::vector<ParticleSet*>& P_list, const RefVector<WFBufferType>& buf_list) { // We can't make this static but we can use a lambda with no capture to // restrict access to *this scope auto registerComponentData = [](WaveFunctionComponent& wfc, ParticleSet& pset, WFBufferType& wfb) { wfc.registerData(pset, wfb); }; for (int iw = 0; iw < WFC_list.size(); iw++) registerComponentData(*(WFC_list[iw]), *(P_list[iw]), buf_list[iw]); } /** For particle-by-particle move. Put the objects of this class * in the walker buffer or forward the memory cursor. * @param P particle set * @param buf Anonymous storage * @param fromscratch request recomputing the precision critical * pieces of wavefunction from scratch * @return log value of the wavefunction. */ virtual LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) = 0; /** For particle-by-particle move. Put the objects of this class * in the walker buffer or forward the memory cursor. * @param WFC_list the list of WaveFunctionComponent pointers of the same component in a walker batch * @param P_list the list of ParticleSet pointers in a walker batch * @param buf_list Anonymous storage * @@param values the log WF values of walkers in a batch * @param fromscratch request recomputing the precision critical * pieces of wavefunction from scratch */ virtual void mw_updateBuffer(const RefVector<WaveFunctionComponent>& WFC_list, const RefVector<ParticleSet>& P_list, const RefVector<WFBufferType>& buf_list, bool fromscratch = false) { #pragma omp parallel for for (int iw = 0; iw < WFC_list.size(); iw++) WFC_list[iw].get().updateBuffer(P_list[iw], buf_list[iw], fromscratch); } /** For particle-by-particle move. Copy data or attach memory * from a walker buffer to the objects of this class. * The log value, P.G and P.L contribution from the objects * of this class are also added. * @param P particle set * @param buf Anonymous storage */ virtual void copyFromBuffer(ParticleSet& P, WFBufferType& buf) = 0; /** For particle-by-particle move. Copy data or attach memory * from a walker buffer to the objects of this class. * @param P particle set * @param buf Anonymous storage */ virtual void mw_copyFromBuffer(const RefVector<WaveFunctionComponent>& wfc_list, const RefVector<ParticleSet>& p_list, const RefVector<WFBufferType>& buf_list) { #pragma omp parallel for for (int iw = 0; iw < wfc_list.size(); iw++) wfc_list[iw].get().copyFromBuffer(p_list[iw], buf_list[iw]); } /** initialize a shared resource and hand it to collection */ virtual void createResource(ResourceCollection& collection) {} /** acquire a shared resource from collection */ virtual void acquireResource(ResourceCollection& collection) {} /** return a shared resource to collection */ virtual void releaseResource(ResourceCollection& collection) {} /** make clone * @param tqp target Quantum ParticleSet * @param deepcopy if true, make a decopy * * If not true, return a proxy class */ virtual WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const; /** Return the Chiesa kinetic energy correction */ virtual RealType KECorrection(); /** Compute derivatives of the wavefunction with respect to the optimizable * parameters. * @param P particle set * @param optvars optimizable parameters * @param dlogpsi array of derivatives of the log of the wavefunction * @param dhpsioverpsi array of derivatives of the Laplacian of the wavefunction divided by the wavefunction. * Note that this does not use the Laplacian of the log of the wavefunction, as in evaluateLog. * Also the factor of -1/2 from the kinetic energy must be included here. The 1/m * factor is applied in TrialWaveFunction. */ virtual void evaluateDerivatives(ParticleSet& P, const opt_variables_type& optvars, std::vector<ValueType>& dlogpsi, std::vector<ValueType>& dhpsioverpsi); /** Compute derivatives of rhe wavefunction with respect to the optimizable * parameters * @param P particle set * @param optvars optimizable parameters * @param dlogpsi array of derivatives of the log of the wavefunction * Note: this function differs from the evaluateDerivatives function in the way that it only computes * the derivative of the log of the wavefunction. */ virtual void evaluateDerivativesWF(ParticleSet& P, const opt_variables_type& optvars, std::vector<ValueType>& dlogpsi); virtual void multiplyDerivsByOrbR(std::vector<ValueType>& dlogpsi) { RealType myrat = std::real(LogToValue<PsiValueType>::convert(LogValue)); for (int j = 0; j < myVars.size(); j++) { int loc = myVars.where(j); dlogpsi[loc] *= myrat; } } /** Calculates the derivatives of \f$ \grad(\textrm{log}(\psif)) \f$ with respect to the optimizable parameters, and the dot product of this is then performed with the passed-in G_in gradient vector. This object is then returned as dgradlogpsi. */ virtual void evaluateGradDerivatives(const ParticleSet::ParticleGradient_t& G_in, std::vector<ValueType>& dgradlogpsi) { APP_ABORT("Need specialization of WaveFunctionComponent::evaluateGradDerivatives in " + ClassName + " class.\n"); } virtual void finalizeOptimization() {} /** evaluate the ratios of one virtual move with respect to all the particles * @param P reference particleset * @param ratios \f$ ratios[i]=\{{\bf R}\}\rightarrow {r_0,\cdots,r_i^p=pos,\cdots,r_{N-1}}\f$ */ virtual void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios); /** evaluate ratios to evaluate the non-local PP * @param VP VirtualParticleSet * @param ratios ratios with new positions VP.R[k] the VP.refPtcl */ virtual void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios); /** evaluate ratios to evaluate the non-local PP multiple walkers * @param wfc_list the list of WaveFunctionComponent references of the same component in a walker batch * @param vp_list the list of VirtualParticleSet references in a walker batch * @param ratios of all the virtual moves of all the walkers */ virtual void mw_evaluateRatios(const RefVector<WaveFunctionComponent>& wfc_list, const RefVector<const VirtualParticleSet>& vp_list, std::vector<std::vector<ValueType>>& ratios) { #pragma omp parallel for for (int iw = 0; iw < wfc_list.size(); iw++) wfc_list[iw].get().evaluateRatios(vp_list[iw], ratios[iw]); } /** evaluate ratios to evaluate the non-local PP * @param VP VirtualParticleSet * @param ratios ratios with new positions VP.R[k] the VP.refPtcl * @param dratios \f$\partial_{\alpha}(\ln \Psi ({\bf R}^{\prime}) - \ln \Psi ({\bf R})) \f$ */ virtual void evaluateDerivRatios(VirtualParticleSet& VP, const opt_variables_type& optvars, std::vector<ValueType>& ratios, Matrix<ValueType>& dratios); ///////////////////////////////////////////////////// // Functions for vectorized evaluation and updates // ///////////////////////////////////////////////////// #ifdef QMC_CUDA using CTS = CUDAGlobalTypes; virtual void freeGPUmem() {} virtual void recompute(MCWalkerConfiguration& W, bool firstTime) {} virtual void reserve(PointerPool<gpu::device_vector<CTS::ValueType>>& pool, int kblocksize) {} /** Evaluate the log of the WF for all walkers * @param walkers vector of all walkers * @param logPsi output vector of log(psi) */ virtual void addLog(MCWalkerConfiguration& W, std::vector<RealType>& logPsi) { APP_ABORT("Need specialization of WaveFunctionComponent::addLog for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } /** Evaluate the wave-function ratio w.r.t. moving particle iat * for all walkers * @param walkers vector of all walkers * @param iat particle which is moving * @param psi_ratios output vector with psi_new/psi_old */ virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } // Returns the WF ratio and gradient w.r.t. iat for each walker // in the respective vectors virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void ratio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void calcRatio(MCWalkerConfiguration& W, int iat, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::calcRatio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void addRatio(MCWalkerConfiguration& W, int iat, int k, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::addRatio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void ratio(std::vector<Walker_t*>& walkers, std::vector<int>& iatList, std::vector<PosType>& rNew, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::ratio for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void addGradient(MCWalkerConfiguration& W, int iat, std::vector<GradType>& grad) { APP_ABORT("Need specialization of WaveFunctionComponent::addGradient for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void calcGradient(MCWalkerConfiguration& W, int iat, int k, std::vector<GradType>& grad) { APP_ABORT("Need specialization of WaveFunctionComponent::calcGradient for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void gradLapl(MCWalkerConfiguration& W, GradMatrix_t& grads, ValueMatrix_t& lapl) { APP_ABORT("Need specialization of WaveFunctionComponent::gradLapl for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void det_lookahead(MCWalkerConfiguration& W, std::vector<ValueType>& psi_ratios, std::vector<GradType>& grad, std::vector<ValueType>& lapl, int iat, int k, int kd, int nw) { APP_ABORT("Need specialization of WaveFunctionComponent::det_lookahead for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void update(MCWalkerConfiguration* W, std::vector<Walker_t*>& walkers, int iat, std::vector<bool>* acc, int k) { APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void update(const std::vector<Walker_t*>& walkers, const std::vector<int>& iatList) { APP_ABORT("Need specialization of WaveFunctionComponent::update for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void NLratios(MCWalkerConfiguration& W, std::vector<NLjob>& jobList, std::vector<PosType>& quadPoints, std::vector<ValueType>& psi_ratios) { APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void NLratios(MCWalkerConfiguration& W, gpu::device_vector<CUDA_PRECISION*>& Rlist, gpu::device_vector<int*>& ElecList, gpu::device_vector<int>& NumCoreElecs, gpu::device_vector<CUDA_PRECISION*>& QuadPosList, gpu::device_vector<CUDA_PRECISION*>& RatioList, int numQuadPoints) { APP_ABORT("Need specialization of WaveFunctionComponent::NLRatios for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } virtual void evaluateDerivatives(MCWalkerConfiguration& W, const opt_variables_type& optvars, RealMatrix_t& dgrad_logpsi, RealMatrix_t& dhpsi_over_psi) { APP_ABORT("Need specialization of WaveFunctionComponent::evaluateDerivatives for " + ClassName + ".\n Required CUDA functionality not implemented. Contact developers.\n"); } #endif }; } // namespace qmcplusplus #endif
GB_binop__isge_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint16) // A*D function (colscale): GB (_AxD__isge_uint16) // D*A function (rowscale): GB (_DxB__isge_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint16) // C=scalar+B GB (_bind1st__isge_uint16) // C=scalar+B' GB (_bind1st_tran__isge_uint16) // C=A+scalar GB (_bind2nd__isge_uint16) // C=A'+scalar GB (_bind2nd_tran__isge_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT16 || GxB_NO_ISGE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pt.c
/* Handle parameterized types (templates) for GNU -*- C++ -*-. Copyright (C) 1992-2020 Free Software Foundation, Inc. Written by Ken Raeburn (raeburn@cygnus.com) while at Watchmaker Computing. Rewritten by Jason Merrill (jason@cygnus.com). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Known bugs or deficiencies include: all methods must be provided in header files; can't use a source file that contains only the method templates and "just win". */ #include "config.h" #include "system.h" #include "coretypes.h" #include "cp-tree.h" #include "timevar.h" #include "stringpool.h" #include "varasm.h" #include "attribs.h" #include "stor-layout.h" #include "intl.h" #include "c-family/c-objc.h" #include "cp-objcp-common.h" #include "toplev.h" #include "tree-iterator.h" #include "type-utils.h" #include "gimplify.h" #include "gcc-rich-location.h" #include "selftest.h" #include "target.h" /* The type of functions taking a tree, and some additional data, and returning an int. */ typedef int (*tree_fn_t) (tree, void*); /* The PENDING_TEMPLATES is a list of templates whose instantiations have been deferred, either because their definitions were not yet available, or because we were putting off doing the work. */ struct GTY ((chain_next ("%h.next"))) pending_template { struct pending_template *next; struct tinst_level *tinst; }; static GTY(()) struct pending_template *pending_templates; static GTY(()) struct pending_template *last_pending_template; int processing_template_parmlist; static int template_header_count; static GTY(()) tree saved_trees; static vec<int> inline_parm_levels; static GTY(()) struct tinst_level *current_tinst_level; static GTY(()) vec<tree, va_gc> *saved_access_scope; /* Live only within one (recursive) call to tsubst_expr. We use this to pass the statement expression node from the STMT_EXPR to the EXPR_STMT that is its result. */ static tree cur_stmt_expr; // -------------------------------------------------------------------------- // // Local Specialization Stack // // Implementation of the RAII helper for creating new local // specializations. local_specialization_stack::local_specialization_stack (lss_policy policy) : saved (local_specializations) { if (policy == lss_nop) ; else if (policy == lss_blank || !saved) local_specializations = new hash_map<tree, tree>; else local_specializations = new hash_map<tree, tree>(*saved); } local_specialization_stack::~local_specialization_stack () { if (local_specializations != saved) { delete local_specializations; local_specializations = saved; } } /* True if we've recursed into fn_type_unification too many times. */ static bool excessive_deduction_depth; struct GTY((for_user)) spec_entry { tree tmpl; tree args; tree spec; }; struct spec_hasher : ggc_ptr_hash<spec_entry> { static hashval_t hash (spec_entry *); static bool equal (spec_entry *, spec_entry *); }; /* The general template is not in these tables. */ typedef hash_table<spec_hasher> spec_hash_table; static GTY (()) spec_hash_table *decl_specializations; static GTY (()) spec_hash_table *type_specializations; /* Contains canonical template parameter types. The vector is indexed by the TEMPLATE_TYPE_IDX of the template parameter. Each element is a TREE_LIST, whose TREE_VALUEs contain the canonical template parameters of various types and levels. */ static GTY(()) vec<tree, va_gc> *canonical_template_parms; #define UNIFY_ALLOW_NONE 0 #define UNIFY_ALLOW_MORE_CV_QUAL 1 #define UNIFY_ALLOW_LESS_CV_QUAL 2 #define UNIFY_ALLOW_DERIVED 4 #define UNIFY_ALLOW_INTEGER 8 #define UNIFY_ALLOW_OUTER_LEVEL 16 #define UNIFY_ALLOW_OUTER_MORE_CV_QUAL 32 #define UNIFY_ALLOW_OUTER_LESS_CV_QUAL 64 enum template_base_result { tbr_incomplete_type, tbr_ambiguous_baseclass, tbr_success }; static bool resolve_overloaded_unification (tree, tree, tree, tree, unification_kind_t, int, bool); static int try_one_overload (tree, tree, tree, tree, tree, unification_kind_t, int, bool, bool); static int unify (tree, tree, tree, tree, int, bool); static void add_pending_template (tree); static tree reopen_tinst_level (struct tinst_level *); static tree tsubst_initializer_list (tree, tree); static tree get_partial_spec_bindings (tree, tree, tree); static tree coerce_template_parms (tree, tree, tree, tsubst_flags_t, bool, bool); static tree coerce_innermost_template_parms (tree, tree, tree, tsubst_flags_t, bool, bool); static void tsubst_enum (tree, tree, tree); static tree add_to_template_args (tree, tree); static bool check_instantiated_args (tree, tree, tsubst_flags_t); static int check_non_deducible_conversion (tree, tree, int, int, struct conversion **, bool); static int maybe_adjust_types_for_deduction (unification_kind_t, tree*, tree*, tree); static int type_unification_real (tree, tree, tree, const tree *, unsigned int, int, unification_kind_t, vec<deferred_access_check, va_gc> **, bool); static void note_template_header (int); static tree convert_nontype_argument_function (tree, tree, tsubst_flags_t); static tree convert_nontype_argument (tree, tree, tsubst_flags_t); static tree convert_template_argument (tree, tree, tree, tsubst_flags_t, int, tree); static tree for_each_template_parm (tree, tree_fn_t, void*, hash_set<tree> *, bool, tree_fn_t = NULL); static tree expand_template_argument_pack (tree); static tree build_template_parm_index (int, int, int, tree, tree); static bool inline_needs_template_parms (tree, bool); static void push_inline_template_parms_recursive (tree, int); static tree reduce_template_parm_level (tree, tree, int, tree, tsubst_flags_t); static int mark_template_parm (tree, void *); static int template_parm_this_level_p (tree, void *); static tree tsubst_friend_function (tree, tree); static tree tsubst_friend_class (tree, tree); static int can_complete_type_without_circularity (tree); static tree get_bindings (tree, tree, tree, bool); static int template_decl_level (tree); static int check_cv_quals_for_unify (int, tree, tree); static int unify_pack_expansion (tree, tree, tree, tree, unification_kind_t, bool, bool); static tree copy_template_args (tree); static tree tsubst_template_parms (tree, tree, tsubst_flags_t); tree most_specialized_partial_spec (tree, tsubst_flags_t); static tree tsubst_aggr_type (tree, tree, tsubst_flags_t, tree, int); static tree tsubst_arg_types (tree, tree, tree, tsubst_flags_t, tree); static tree tsubst_function_type (tree, tree, tsubst_flags_t, tree); static bool check_specialization_scope (void); static tree process_partial_specialization (tree); static void set_current_access_from_decl (tree); static enum template_base_result get_template_base (tree, tree, tree, tree, bool , tree *); static tree try_class_unification (tree, tree, tree, tree, bool); static bool class_nttp_const_wrapper_p (tree t); static int coerce_template_template_parms (tree, tree, tsubst_flags_t, tree, tree); static bool template_template_parm_bindings_ok_p (tree, tree); static void tsubst_default_arguments (tree, tsubst_flags_t); static tree for_each_template_parm_r (tree *, int *, void *); static tree copy_default_args_to_explicit_spec_1 (tree, tree); static void copy_default_args_to_explicit_spec (tree); static bool invalid_nontype_parm_type_p (tree, tsubst_flags_t); static bool dependent_template_arg_p (tree); static bool any_template_arguments_need_structural_equality_p (tree); static bool dependent_type_p_r (tree); static tree tsubst_copy (tree, tree, tsubst_flags_t, tree); static tree tsubst_decl (tree, tree, tsubst_flags_t); static void perform_instantiation_time_access_checks (tree, tree); static tree listify (tree); static tree listify_autos (tree, tree); static tree tsubst_template_parm (tree, tree, tsubst_flags_t); static tree instantiate_alias_template (tree, tree, tsubst_flags_t); static bool complex_alias_template_p (const_tree tmpl); static tree get_underlying_template (tree); static tree tsubst_attributes (tree, tree, tsubst_flags_t, tree); static tree canonicalize_expr_argument (tree, tsubst_flags_t); static tree make_argument_pack (tree); static void register_parameter_specializations (tree, tree); static tree enclosing_instantiation_of (tree tctx); static void instantiate_body (tree pattern, tree args, tree d, bool nested); /* Make the current scope suitable for access checking when we are processing T. T can be FUNCTION_DECL for instantiated function template, VAR_DECL for static member variable, or TYPE_DECL for alias template (needed by instantiate_decl). */ void push_access_scope (tree t) { gcc_assert (VAR_OR_FUNCTION_DECL_P (t) || TREE_CODE (t) == TYPE_DECL); if (DECL_FRIEND_CONTEXT (t)) push_nested_class (DECL_FRIEND_CONTEXT (t)); else if (DECL_CLASS_SCOPE_P (t)) push_nested_class (DECL_CONTEXT (t)); else push_to_top_level (); if (TREE_CODE (t) == FUNCTION_DECL) { vec_safe_push (saved_access_scope, current_function_decl); current_function_decl = t; } } /* Restore the scope set up by push_access_scope. T is the node we are processing. */ void pop_access_scope (tree t) { if (TREE_CODE (t) == FUNCTION_DECL) current_function_decl = saved_access_scope->pop(); if (DECL_FRIEND_CONTEXT (t) || DECL_CLASS_SCOPE_P (t)) pop_nested_class (); else pop_from_top_level (); } /* Do any processing required when DECL (a member template declaration) is finished. Returns the TEMPLATE_DECL corresponding to DECL, unless it is a specialization, in which case the DECL itself is returned. */ tree finish_member_template_decl (tree decl) { if (decl == error_mark_node) return error_mark_node; gcc_assert (DECL_P (decl)); if (TREE_CODE (decl) == TYPE_DECL) { tree type; type = TREE_TYPE (decl); if (type == error_mark_node) return error_mark_node; if (MAYBE_CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && !CLASSTYPE_TEMPLATE_SPECIALIZATION (type)) { tree tmpl = CLASSTYPE_TI_TEMPLATE (type); check_member_template (tmpl); return tmpl; } return NULL_TREE; } else if (TREE_CODE (decl) == FIELD_DECL) error_at (DECL_SOURCE_LOCATION (decl), "data member %qD cannot be a member template", decl); else if (DECL_TEMPLATE_INFO (decl)) { if (!DECL_TEMPLATE_SPECIALIZATION (decl)) { check_member_template (DECL_TI_TEMPLATE (decl)); return DECL_TI_TEMPLATE (decl); } else return decl; } else error_at (DECL_SOURCE_LOCATION (decl), "invalid member template declaration %qD", decl); return error_mark_node; } /* Create a template info node. */ tree build_template_info (tree template_decl, tree template_args) { tree result = make_node (TEMPLATE_INFO); TI_TEMPLATE (result) = template_decl; TI_ARGS (result) = template_args; return result; } /* Return the template info node corresponding to T, whatever T is. */ tree get_template_info (const_tree t) { tree tinfo = NULL_TREE; if (!t || t == error_mark_node) return NULL; if (TREE_CODE (t) == NAMESPACE_DECL || TREE_CODE (t) == PARM_DECL) return NULL; if (DECL_P (t) && DECL_LANG_SPECIFIC (t)) tinfo = DECL_TEMPLATE_INFO (t); if (!tinfo && DECL_IMPLICIT_TYPEDEF_P (t)) t = TREE_TYPE (t); if (OVERLOAD_TYPE_P (t)) tinfo = TYPE_TEMPLATE_INFO (t); else if (TREE_CODE (t) == BOUND_TEMPLATE_TEMPLATE_PARM) tinfo = TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (t); return tinfo; } /* Returns the template nesting level of the indicated class TYPE. For example, in: template <class T> struct A { template <class U> struct B {}; }; A<T>::B<U> has depth two, while A<T> has depth one. Both A<T>::B<int> and A<int>::B<U> have depth one, if they are instantiations, not specializations. This function is guaranteed to return 0 if passed NULL_TREE so that, for example, `template_class_depth (current_class_type)' is always safe. */ int template_class_depth (tree type) { int depth; for (depth = 0; type && TREE_CODE (type) != NAMESPACE_DECL; ) { tree tinfo = get_template_info (type); if (tinfo && PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo)) && uses_template_parms (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo)))) ++depth; if (DECL_P (type)) { if (tree fctx = DECL_FRIEND_CONTEXT (type)) type = fctx; else type = CP_DECL_CONTEXT (type); } else if (LAMBDA_TYPE_P (type) && LAMBDA_TYPE_EXTRA_SCOPE (type)) type = LAMBDA_TYPE_EXTRA_SCOPE (type); else type = CP_TYPE_CONTEXT (type); } return depth; } /* Return TRUE if NODE instantiates a template that has arguments of its own, be it directly a primary template or indirectly through a partial specializations. */ static bool instantiates_primary_template_p (tree node) { tree tinfo = get_template_info (node); if (!tinfo) return false; tree tmpl = TI_TEMPLATE (tinfo); if (PRIMARY_TEMPLATE_P (tmpl)) return true; if (!DECL_TEMPLATE_SPECIALIZATION (tmpl)) return false; /* So now we know we have a specialization, but it could be a full or a partial specialization. To tell which, compare the depth of its template arguments with those of its context. */ tree ctxt = DECL_CONTEXT (tmpl); tree ctinfo = get_template_info (ctxt); if (!ctinfo) return true; return (TMPL_ARGS_DEPTH (TI_ARGS (tinfo)) > TMPL_ARGS_DEPTH (TI_ARGS (ctinfo))); } /* Subroutine of maybe_begin_member_template_processing. Returns true if processing DECL needs us to push template parms. */ static bool inline_needs_template_parms (tree decl, bool nsdmi) { if (!decl || (!nsdmi && ! DECL_TEMPLATE_INFO (decl))) return false; return (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (most_general_template (decl))) > (processing_template_decl + DECL_TEMPLATE_SPECIALIZATION (decl))); } /* Subroutine of maybe_begin_member_template_processing. Push the template parms in PARMS, starting from LEVELS steps into the chain, and ending at the beginning, since template parms are listed innermost first. */ static void push_inline_template_parms_recursive (tree parmlist, int levels) { tree parms = TREE_VALUE (parmlist); int i; if (levels > 1) push_inline_template_parms_recursive (TREE_CHAIN (parmlist), levels - 1); ++processing_template_decl; current_template_parms = tree_cons (size_int (processing_template_decl), parms, current_template_parms); TEMPLATE_PARMS_FOR_INLINE (current_template_parms) = 1; begin_scope (TREE_VEC_LENGTH (parms) ? sk_template_parms : sk_template_spec, NULL); for (i = 0; i < TREE_VEC_LENGTH (parms); ++i) { tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (error_operand_p (parm)) continue; gcc_assert (DECL_P (parm)); switch (TREE_CODE (parm)) { case TYPE_DECL: case TEMPLATE_DECL: pushdecl (parm); break; case PARM_DECL: /* Push the CONST_DECL. */ pushdecl (TEMPLATE_PARM_DECL (DECL_INITIAL (parm))); break; default: gcc_unreachable (); } } } /* Restore the template parameter context for a member template, a friend template defined in a class definition, or a non-template member of template class. */ void maybe_begin_member_template_processing (tree decl) { tree parms; int levels = 0; bool nsdmi = TREE_CODE (decl) == FIELD_DECL; if (nsdmi) { tree ctx = DECL_CONTEXT (decl); decl = (CLASSTYPE_TEMPLATE_INFO (ctx) /* Disregard full specializations (c++/60999). */ && uses_template_parms (ctx) ? CLASSTYPE_TI_TEMPLATE (ctx) : NULL_TREE); } if (inline_needs_template_parms (decl, nsdmi)) { parms = DECL_TEMPLATE_PARMS (most_general_template (decl)); levels = TMPL_PARMS_DEPTH (parms) - processing_template_decl; if (DECL_TEMPLATE_SPECIALIZATION (decl)) { --levels; parms = TREE_CHAIN (parms); } push_inline_template_parms_recursive (parms, levels); } /* Remember how many levels of template parameters we pushed so that we can pop them later. */ inline_parm_levels.safe_push (levels); } /* Undo the effects of maybe_begin_member_template_processing. */ void maybe_end_member_template_processing (void) { int i; int last; if (inline_parm_levels.length () == 0) return; last = inline_parm_levels.pop (); for (i = 0; i < last; ++i) { --processing_template_decl; current_template_parms = TREE_CHAIN (current_template_parms); poplevel (0, 0, 0); } } /* Return a new template argument vector which contains all of ARGS, but has as its innermost set of arguments the EXTRA_ARGS. */ static tree add_to_template_args (tree args, tree extra_args) { tree new_args; int extra_depth; int i; int j; if (args == NULL_TREE || extra_args == error_mark_node) return extra_args; extra_depth = TMPL_ARGS_DEPTH (extra_args); new_args = make_tree_vec (TMPL_ARGS_DEPTH (args) + extra_depth); for (i = 1; i <= TMPL_ARGS_DEPTH (args); ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i)); for (j = 1; j <= extra_depth; ++j, ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (extra_args, j)); return new_args; } /* Like add_to_template_args, but only the outermost ARGS are added to the EXTRA_ARGS. In particular, all but TMPL_ARGS_DEPTH (EXTRA_ARGS) levels are added. This function is used to combine the template arguments from a partial instantiation with the template arguments used to attain the full instantiation from the partial instantiation. If ARGS is a TEMPLATE_DECL, use its parameters as args. */ tree add_outermost_template_args (tree args, tree extra_args) { tree new_args; if (!args) return extra_args; if (TREE_CODE (args) == TEMPLATE_DECL) { tree ti = get_template_info (DECL_TEMPLATE_RESULT (args)); args = TI_ARGS (ti); } /* If there are more levels of EXTRA_ARGS than there are ARGS, something very fishy is going on. */ gcc_assert (TMPL_ARGS_DEPTH (args) >= TMPL_ARGS_DEPTH (extra_args)); /* If *all* the new arguments will be the EXTRA_ARGS, just return them. */ if (TMPL_ARGS_DEPTH (args) == TMPL_ARGS_DEPTH (extra_args)) return extra_args; /* For the moment, we make ARGS look like it contains fewer levels. */ TREE_VEC_LENGTH (args) -= TMPL_ARGS_DEPTH (extra_args); new_args = add_to_template_args (args, extra_args); /* Now, we restore ARGS to its full dimensions. */ TREE_VEC_LENGTH (args) += TMPL_ARGS_DEPTH (extra_args); return new_args; } /* Return the N levels of innermost template arguments from the ARGS. */ tree get_innermost_template_args (tree args, int n) { tree new_args; int extra_levels; int i; gcc_assert (n >= 0); /* If N is 1, just return the innermost set of template arguments. */ if (n == 1) return TMPL_ARGS_LEVEL (args, TMPL_ARGS_DEPTH (args)); /* If we're not removing anything, just return the arguments we were given. */ extra_levels = TMPL_ARGS_DEPTH (args) - n; gcc_assert (extra_levels >= 0); if (extra_levels == 0) return args; /* Make a new set of arguments, not containing the outer arguments. */ new_args = make_tree_vec (n); for (i = 1; i <= n; ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i + extra_levels)); return new_args; } /* The inverse of get_innermost_template_args: Return all but the innermost EXTRA_LEVELS levels of template arguments from the ARGS. */ static tree strip_innermost_template_args (tree args, int extra_levels) { tree new_args; int n = TMPL_ARGS_DEPTH (args) - extra_levels; int i; gcc_assert (n >= 0); /* If N is 1, just return the outermost set of template arguments. */ if (n == 1) return TMPL_ARGS_LEVEL (args, 1); /* If we're not removing anything, just return the arguments we were given. */ gcc_assert (extra_levels >= 0); if (extra_levels == 0) return args; /* Make a new set of arguments, not containing the inner arguments. */ new_args = make_tree_vec (n); for (i = 1; i <= n; ++i) SET_TMPL_ARGS_LEVEL (new_args, i, TMPL_ARGS_LEVEL (args, i)); return new_args; } /* We've got a template header coming up; push to a new level for storing the parms. */ void begin_template_parm_list (void) { /* We use a non-tag-transparent scope here, which causes pushtag to put tags in this scope, rather than in the enclosing class or namespace scope. This is the right thing, since we want TEMPLATE_DECLS, and not TYPE_DECLS for template classes. For a global template class, push_template_decl handles putting the TEMPLATE_DECL into top-level scope. For a nested template class, e.g.: template <class T> struct S1 { template <class T> struct S2 {}; }; pushtag contains special code to insert the TEMPLATE_DECL for S2 at the right scope. */ begin_scope (sk_template_parms, NULL); ++processing_template_decl; ++processing_template_parmlist; note_template_header (0); /* Add a dummy parameter level while we process the parameter list. */ current_template_parms = tree_cons (size_int (processing_template_decl), make_tree_vec (0), current_template_parms); } /* This routine is called when a specialization is declared. If it is invalid to declare a specialization here, an error is reported and false is returned, otherwise this routine will return true. */ static bool check_specialization_scope (void) { tree scope = current_scope (); /* [temp.expl.spec] An explicit specialization shall be declared in the namespace of which the template is a member, or, for member templates, in the namespace of which the enclosing class or enclosing class template is a member. An explicit specialization of a member function, member class or static data member of a class template shall be declared in the namespace of which the class template is a member. */ if (scope && TREE_CODE (scope) != NAMESPACE_DECL) { error ("explicit specialization in non-namespace scope %qD", scope); return false; } /* [temp.expl.spec] In an explicit specialization declaration for a member of a class template or a member template that appears in namespace scope, the member template and some of its enclosing class templates may remain unspecialized, except that the declaration shall not explicitly specialize a class member template if its enclosing class templates are not explicitly specialized as well. */ if (current_template_parms) { error ("enclosing class templates are not explicitly specialized"); return false; } return true; } /* We've just seen template <>. */ bool begin_specialization (void) { begin_scope (sk_template_spec, NULL); note_template_header (1); return check_specialization_scope (); } /* Called at then end of processing a declaration preceded by template<>. */ void end_specialization (void) { finish_scope (); reset_specialization (); } /* Any template <>'s that we have seen thus far are not referring to a function specialization. */ void reset_specialization (void) { processing_specialization = 0; template_header_count = 0; } /* We've just seen a template header. If SPECIALIZATION is nonzero, it was of the form template <>. */ static void note_template_header (int specialization) { processing_specialization = specialization; template_header_count++; } /* We're beginning an explicit instantiation. */ void begin_explicit_instantiation (void) { gcc_assert (!processing_explicit_instantiation); processing_explicit_instantiation = true; } void end_explicit_instantiation (void) { gcc_assert (processing_explicit_instantiation); processing_explicit_instantiation = false; } /* An explicit specialization or partial specialization of TMPL is being declared. Check that the namespace in which the specialization is occurring is permissible. Returns false iff it is invalid to specialize TMPL in the current namespace. */ static bool check_specialization_namespace (tree tmpl) { tree tpl_ns = decl_namespace_context (tmpl); /* [tmpl.expl.spec] An explicit specialization shall be declared in a namespace enclosing the specialized template. An explicit specialization whose declarator-id is not qualified shall be declared in the nearest enclosing namespace of the template, or, if the namespace is inline (7.3.1), any namespace from its enclosing namespace set. */ if (current_scope() != DECL_CONTEXT (tmpl) && !at_namespace_scope_p ()) { error ("specialization of %qD must appear at namespace scope", tmpl); return false; } if (is_nested_namespace (current_namespace, tpl_ns, cxx_dialect < cxx11)) /* Same or enclosing namespace. */ return true; else { auto_diagnostic_group d; if (permerror (input_location, "specialization of %qD in different namespace", tmpl)) inform (DECL_SOURCE_LOCATION (tmpl), " from definition of %q#D", tmpl); return false; } } /* SPEC is an explicit instantiation. Check that it is valid to perform this explicit instantiation in the current namespace. */ static void check_explicit_instantiation_namespace (tree spec) { tree ns; /* DR 275: An explicit instantiation shall appear in an enclosing namespace of its template. */ ns = decl_namespace_context (spec); if (!is_nested_namespace (current_namespace, ns)) permerror (input_location, "explicit instantiation of %qD in namespace %qD " "(which does not enclose namespace %qD)", spec, current_namespace, ns); } /* Returns the type of a template specialization only if that specialization needs to be defined. Otherwise (e.g., if the type has already been defined), the function returns NULL_TREE. */ static tree maybe_new_partial_specialization (tree type) { /* An implicit instantiation of an incomplete type implies the definition of a new class template. template<typename T> struct S; template<typename T> struct S<T*>; Here, S<T*> is an implicit instantiation of S whose type is incomplete. */ if (CLASSTYPE_IMPLICIT_INSTANTIATION (type) && !COMPLETE_TYPE_P (type)) return type; /* It can also be the case that TYPE is a completed specialization. Continuing the previous example, suppose we also declare: template<typename T> requires Integral<T> struct S<T*>; Here, S<T*> refers to the specialization S<T*> defined above. However, we need to differentiate definitions because we intend to define a new partial specialization. In this case, we rely on the fact that the constraints are different for this declaration than that above. Note that we also get here for injected class names and late-parsed template definitions. We must ensure that we do not create new type declarations for those cases. */ if (flag_concepts && CLASSTYPE_TEMPLATE_SPECIALIZATION (type)) { tree tmpl = CLASSTYPE_TI_TEMPLATE (type); tree args = CLASSTYPE_TI_ARGS (type); /* If there are no template parameters, this cannot be a new partial template specialization? */ if (!current_template_parms) return NULL_TREE; /* The injected-class-name is not a new partial specialization. */ if (DECL_SELF_REFERENCE_P (TYPE_NAME (type))) return NULL_TREE; /* If the constraints are not the same as those of the primary then, we can probably create a new specialization. */ tree type_constr = current_template_constraints (); if (type == TREE_TYPE (tmpl)) { tree main_constr = get_constraints (tmpl); if (equivalent_constraints (type_constr, main_constr)) return NULL_TREE; } /* Also, if there's a pre-existing specialization with matching constraints, then this also isn't new. */ tree specs = DECL_TEMPLATE_SPECIALIZATIONS (tmpl); while (specs) { tree spec_tmpl = TREE_VALUE (specs); tree spec_args = TREE_PURPOSE (specs); tree spec_constr = get_constraints (spec_tmpl); if (comp_template_args (args, spec_args) && equivalent_constraints (type_constr, spec_constr)) return NULL_TREE; specs = TREE_CHAIN (specs); } /* Create a new type node (and corresponding type decl) for the newly declared specialization. */ tree t = make_class_type (TREE_CODE (type)); CLASSTYPE_DECLARED_CLASS (t) = CLASSTYPE_DECLARED_CLASS (type); SET_TYPE_TEMPLATE_INFO (t, build_template_info (tmpl, args)); /* We only need a separate type node for storing the definition of this partial specialization; uses of S<T*> are unconstrained, so all are equivalent. So keep TYPE_CANONICAL the same. */ TYPE_CANONICAL (t) = TYPE_CANONICAL (type); /* Build the corresponding type decl. */ tree d = create_implicit_typedef (DECL_NAME (tmpl), t); DECL_CONTEXT (d) = TYPE_CONTEXT (t); DECL_SOURCE_LOCATION (d) = input_location; TREE_PRIVATE (d) = (current_access_specifier == access_private_node); TREE_PROTECTED (d) = (current_access_specifier == access_protected_node); return t; } return NULL_TREE; } /* The TYPE is being declared. If it is a template type, that means it is a partial specialization. Do appropriate error-checking. */ tree maybe_process_partial_specialization (tree type) { tree context; if (type == error_mark_node) return error_mark_node; /* A lambda that appears in specialization context is not itself a specialization. */ if (CLASS_TYPE_P (type) && CLASSTYPE_LAMBDA_EXPR (type)) return type; if (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) { error ("name of class shadows template template parameter %qD", TYPE_NAME (type)); return error_mark_node; } context = TYPE_CONTEXT (type); if (TYPE_ALIAS_P (type)) { tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (type); if (tinfo && DECL_ALIAS_TEMPLATE_P (TI_TEMPLATE (tinfo))) error ("specialization of alias template %qD", TI_TEMPLATE (tinfo)); else error ("explicit specialization of non-template %qT", type); return error_mark_node; } else if (CLASS_TYPE_P (type) && CLASSTYPE_USE_TEMPLATE (type)) { /* This is for ordinary explicit specialization and partial specialization of a template class such as: template <> class C<int>; or: template <class T> class C<T*>; Make sure that `C<int>' and `C<T*>' are implicit instantiations. */ if (tree t = maybe_new_partial_specialization (type)) { if (!check_specialization_namespace (CLASSTYPE_TI_TEMPLATE (t)) && !at_namespace_scope_p ()) return error_mark_node; SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (t); DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (t)) = input_location; if (processing_template_decl) { tree decl = push_template_decl (TYPE_MAIN_DECL (t)); if (decl == error_mark_node) return error_mark_node; return TREE_TYPE (decl); } } else if (CLASSTYPE_TEMPLATE_INSTANTIATION (type)) error ("specialization of %qT after instantiation", type); else if (errorcount && !processing_specialization && CLASSTYPE_TEMPLATE_SPECIALIZATION (type) && !uses_template_parms (CLASSTYPE_TI_ARGS (type))) /* Trying to define a specialization either without a template<> header or in an inappropriate place. We've already given an error, so just bail now so we don't actually define the specialization. */ return error_mark_node; } else if (CLASS_TYPE_P (type) && !CLASSTYPE_USE_TEMPLATE (type) && CLASSTYPE_TEMPLATE_INFO (type) && context && CLASS_TYPE_P (context) && CLASSTYPE_TEMPLATE_INFO (context)) { /* This is for an explicit specialization of member class template according to [temp.expl.spec/18]: template <> template <class U> class C<int>::D; The context `C<int>' must be an implicit instantiation. Otherwise this is just a member class template declared earlier like: template <> class C<int> { template <class U> class D; }; template <> template <class U> class C<int>::D; In the first case, `C<int>::D' is a specialization of `C<T>::D' while in the second case, `C<int>::D' is a primary template and `C<T>::D' may not exist. */ if (CLASSTYPE_IMPLICIT_INSTANTIATION (context) && !COMPLETE_TYPE_P (type)) { tree t; tree tmpl = CLASSTYPE_TI_TEMPLATE (type); if (current_namespace != decl_namespace_context (tmpl)) { if (permerror (input_location, "specialization of %qD in different namespace", type)) inform (DECL_SOURCE_LOCATION (tmpl), "from definition of %q#D", tmpl); } /* Check for invalid specialization after instantiation: template <> template <> class C<int>::D<int>; template <> template <class U> class C<int>::D; */ for (t = DECL_TEMPLATE_INSTANTIATIONS (tmpl); t; t = TREE_CHAIN (t)) { tree inst = TREE_VALUE (t); if (CLASSTYPE_TEMPLATE_SPECIALIZATION (inst) || !COMPLETE_OR_OPEN_TYPE_P (inst)) { /* We already have a full specialization of this partial instantiation, or a full specialization has been looked up but not instantiated. Reassign it to the new member specialization template. */ spec_entry elt; spec_entry *entry; elt.tmpl = most_general_template (tmpl); elt.args = CLASSTYPE_TI_ARGS (inst); elt.spec = inst; type_specializations->remove_elt (&elt); elt.tmpl = tmpl; CLASSTYPE_TI_ARGS (inst) = elt.args = INNERMOST_TEMPLATE_ARGS (elt.args); spec_entry **slot = type_specializations->find_slot (&elt, INSERT); entry = ggc_alloc<spec_entry> (); *entry = elt; *slot = entry; } else /* But if we've had an implicit instantiation, that's a problem ([temp.expl.spec]/6). */ error ("specialization %qT after instantiation %qT", type, inst); } /* Mark TYPE as a specialization. And as a result, we only have one level of template argument for the innermost class template. */ SET_CLASSTYPE_TEMPLATE_SPECIALIZATION (type); DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)) = input_location; CLASSTYPE_TI_ARGS (type) = INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type)); } } else if (processing_specialization) { /* Someday C++0x may allow for enum template specialization. */ if (cxx_dialect > cxx98 && TREE_CODE (type) == ENUMERAL_TYPE && CLASS_TYPE_P (context) && CLASSTYPE_USE_TEMPLATE (context)) pedwarn (input_location, OPT_Wpedantic, "template specialization " "of %qD not allowed by ISO C++", type); else { error ("explicit specialization of non-template %qT", type); return error_mark_node; } } return type; } /* Returns nonzero if we can optimize the retrieval of specializations for TMPL, a TEMPLATE_DECL. In particular, for such a template, we do not use DECL_TEMPLATE_SPECIALIZATIONS at all. */ static inline bool optimize_specialization_lookup_p (tree tmpl) { return (DECL_FUNCTION_TEMPLATE_P (tmpl) && DECL_CLASS_SCOPE_P (tmpl) /* DECL_CLASS_SCOPE_P holds of T::f even if T is a template parameter. */ && CLASS_TYPE_P (DECL_CONTEXT (tmpl)) /* The optimized lookup depends on the fact that the template arguments for the member function template apply purely to the containing class, which is not true if the containing class is an explicit or partial specialization. */ && !CLASSTYPE_TEMPLATE_SPECIALIZATION (DECL_CONTEXT (tmpl)) && !DECL_MEMBER_TEMPLATE_P (tmpl) && !DECL_CONV_FN_P (tmpl) /* It is possible to have a template that is not a member template and is not a member of a template class: template <typename T> struct S { friend A::f(); }; Here, the friend function is a template, but the context does not have template information. The optimized lookup relies on having ARGS be the template arguments for both the class and the function template. */ && !DECL_UNIQUE_FRIEND_P (DECL_TEMPLATE_RESULT (tmpl))); } /* Make sure ARGS doesn't use any inappropriate typedefs; we should have gone through coerce_template_parms by now. */ static void verify_unstripped_args_1 (tree inner) { for (int i = 0; i < TREE_VEC_LENGTH (inner); ++i) { tree arg = TREE_VEC_ELT (inner, i); if (TREE_CODE (arg) == TEMPLATE_DECL) /* OK */; else if (TYPE_P (arg)) gcc_assert (strip_typedefs (arg, NULL) == arg); else if (ARGUMENT_PACK_P (arg)) verify_unstripped_args_1 (ARGUMENT_PACK_ARGS (arg)); else if (strip_typedefs (TREE_TYPE (arg), NULL) != TREE_TYPE (arg)) /* Allow typedefs on the type of a non-type argument, since a parameter can have them. */; else gcc_assert (strip_typedefs_expr (arg, NULL) == arg); } } static void verify_unstripped_args (tree args) { ++processing_template_decl; if (!any_dependent_template_arguments_p (args)) verify_unstripped_args_1 (INNERMOST_TEMPLATE_ARGS (args)); --processing_template_decl; } /* Retrieve the specialization (in the sense of [temp.spec] - a specialization is either an instantiation or an explicit specialization) of TMPL for the given template ARGS. If there is no such specialization, return NULL_TREE. The ARGS are a vector of arguments, or a vector of vectors of arguments, in the case of templates with more than one level of parameters. If TMPL is a type template and CLASS_SPECIALIZATIONS_P is true, then we search for a partial specialization matching ARGS. This parameter is ignored if TMPL is not a class template. We can also look up a FIELD_DECL, if it is a lambda capture pack; the result is a NONTYPE_ARGUMENT_PACK. */ static tree retrieve_specialization (tree tmpl, tree args, hashval_t hash) { if (tmpl == NULL_TREE) return NULL_TREE; if (args == error_mark_node) return NULL_TREE; gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL || TREE_CODE (tmpl) == FIELD_DECL); /* There should be as many levels of arguments as there are levels of parameters. */ gcc_assert (TMPL_ARGS_DEPTH (args) == (TREE_CODE (tmpl) == TEMPLATE_DECL ? TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl)) : template_class_depth (DECL_CONTEXT (tmpl)))); if (flag_checking) verify_unstripped_args (args); /* Lambda functions in templates aren't instantiated normally, but through tsubst_lambda_expr. */ if (lambda_fn_in_template_p (tmpl)) return NULL_TREE; if (optimize_specialization_lookup_p (tmpl)) { /* The template arguments actually apply to the containing class. Find the class specialization with those arguments. */ tree class_template = CLASSTYPE_TI_TEMPLATE (DECL_CONTEXT (tmpl)); tree class_specialization = retrieve_specialization (class_template, args, 0); if (!class_specialization) return NULL_TREE; /* Find the instance of TMPL. */ tree fns = get_class_binding (class_specialization, DECL_NAME (tmpl)); for (ovl_iterator iter (fns); iter; ++iter) { tree fn = *iter; if (tree ti = get_template_info (fn)) if (TI_TEMPLATE (ti) == tmpl /* using-declarations can bring in a different instantiation of tmpl as a member of a different instantiation of tmpl's class. We don't want those here. */ && DECL_CONTEXT (fn) == class_specialization) return fn; } return NULL_TREE; } else { spec_entry *found; spec_entry elt; spec_hash_table *specializations; elt.tmpl = tmpl; elt.args = args; elt.spec = NULL_TREE; if (DECL_CLASS_TEMPLATE_P (tmpl)) specializations = type_specializations; else specializations = decl_specializations; if (hash == 0) hash = spec_hasher::hash (&elt); found = specializations->find_with_hash (&elt, hash); if (found) return found->spec; } return NULL_TREE; } /* Like retrieve_specialization, but for local declarations. */ tree retrieve_local_specialization (tree tmpl) { if (local_specializations == NULL) return NULL_TREE; tree *slot = local_specializations->get (tmpl); return slot ? *slot : NULL_TREE; } /* Returns nonzero iff DECL is a specialization of TMPL. */ int is_specialization_of (tree decl, tree tmpl) { tree t; if (TREE_CODE (decl) == FUNCTION_DECL) { for (t = decl; t != NULL_TREE; t = DECL_TEMPLATE_INFO (t) ? DECL_TI_TEMPLATE (t) : NULL_TREE) if (t == tmpl) return 1; } else { gcc_assert (TREE_CODE (decl) == TYPE_DECL); for (t = TREE_TYPE (decl); t != NULL_TREE; t = CLASSTYPE_USE_TEMPLATE (t) ? TREE_TYPE (CLASSTYPE_TI_TEMPLATE (t)) : NULL_TREE) if (same_type_ignoring_top_level_qualifiers_p (t, TREE_TYPE (tmpl))) return 1; } return 0; } /* Returns nonzero iff DECL is a specialization of friend declaration FRIEND_DECL according to [temp.friend]. */ bool is_specialization_of_friend (tree decl, tree friend_decl) { bool need_template = true; int template_depth; gcc_assert (TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == TYPE_DECL); /* For [temp.friend/6] when FRIEND_DECL is an ordinary member function of a template class, we want to check if DECL is a specialization if this. */ if (TREE_CODE (friend_decl) == FUNCTION_DECL && DECL_TEMPLATE_INFO (friend_decl) && !DECL_USE_TEMPLATE (friend_decl)) { /* We want a TEMPLATE_DECL for `is_specialization_of'. */ friend_decl = DECL_TI_TEMPLATE (friend_decl); need_template = false; } else if (TREE_CODE (friend_decl) == TEMPLATE_DECL && !PRIMARY_TEMPLATE_P (friend_decl)) need_template = false; /* There is nothing to do if this is not a template friend. */ if (TREE_CODE (friend_decl) != TEMPLATE_DECL) return false; if (is_specialization_of (decl, friend_decl)) return true; /* [temp.friend/6] A member of a class template may be declared to be a friend of a non-template class. In this case, the corresponding member of every specialization of the class template is a friend of the class granting friendship. For example, given a template friend declaration template <class T> friend void A<T>::f(); the member function below is considered a friend template <> struct A<int> { void f(); }; For this type of template friend, TEMPLATE_DEPTH below will be nonzero. To determine if DECL is a friend of FRIEND, we first check if the enclosing class is a specialization of another. */ template_depth = template_class_depth (CP_DECL_CONTEXT (friend_decl)); if (template_depth && DECL_CLASS_SCOPE_P (decl) && is_specialization_of (TYPE_NAME (DECL_CONTEXT (decl)), CLASSTYPE_TI_TEMPLATE (DECL_CONTEXT (friend_decl)))) { /* Next, we check the members themselves. In order to handle a few tricky cases, such as when FRIEND_DECL's are template <class T> friend void A<T>::g(T t); template <class T> template <T t> friend void A<T>::h(); and DECL's are void A<int>::g(int); template <int> void A<int>::h(); we need to figure out ARGS, the template arguments from the context of DECL. This is required for template substitution of `T' in the function parameter of `g' and template parameter of `h' in the above examples. Here ARGS corresponds to `int'. */ tree context = DECL_CONTEXT (decl); tree args = NULL_TREE; int current_depth = 0; while (current_depth < template_depth) { if (CLASSTYPE_TEMPLATE_INFO (context)) { if (current_depth == 0) args = TYPE_TI_ARGS (context); else args = add_to_template_args (TYPE_TI_ARGS (context), args); current_depth++; } context = TYPE_CONTEXT (context); } if (TREE_CODE (decl) == FUNCTION_DECL) { bool is_template; tree friend_type; tree decl_type; tree friend_args_type; tree decl_args_type; /* Make sure that both DECL and FRIEND_DECL are templates or non-templates. */ is_template = DECL_TEMPLATE_INFO (decl) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl)); if (need_template ^ is_template) return false; else if (is_template) { /* If both are templates, check template parameter list. */ tree friend_parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_decl), args, tf_none); if (!comp_template_parms (DECL_TEMPLATE_PARMS (DECL_TI_TEMPLATE (decl)), friend_parms)) return false; decl_type = TREE_TYPE (DECL_TI_TEMPLATE (decl)); } else decl_type = TREE_TYPE (decl); friend_type = tsubst_function_type (TREE_TYPE (friend_decl), args, tf_none, NULL_TREE); if (friend_type == error_mark_node) return false; /* Check if return types match. */ if (!same_type_p (TREE_TYPE (decl_type), TREE_TYPE (friend_type))) return false; /* Check if function parameter types match, ignoring the `this' parameter. */ friend_args_type = TYPE_ARG_TYPES (friend_type); decl_args_type = TYPE_ARG_TYPES (decl_type); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (friend_decl)) friend_args_type = TREE_CHAIN (friend_args_type); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) decl_args_type = TREE_CHAIN (decl_args_type); return compparms (decl_args_type, friend_args_type); } else { /* DECL is a TYPE_DECL */ bool is_template; tree decl_type = TREE_TYPE (decl); /* Make sure that both DECL and FRIEND_DECL are templates or non-templates. */ is_template = CLASSTYPE_TEMPLATE_INFO (decl_type) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (decl_type)); if (need_template ^ is_template) return false; else if (is_template) { tree friend_parms; /* If both are templates, check the name of the two TEMPLATE_DECL's first because is_friend didn't. */ if (DECL_NAME (CLASSTYPE_TI_TEMPLATE (decl_type)) != DECL_NAME (friend_decl)) return false; /* Now check template parameter list. */ friend_parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_decl), args, tf_none); return comp_template_parms (DECL_TEMPLATE_PARMS (CLASSTYPE_TI_TEMPLATE (decl_type)), friend_parms); } else return (DECL_NAME (decl) == DECL_NAME (friend_decl)); } } return false; } /* Register the specialization SPEC as a specialization of TMPL with the indicated ARGS. IS_FRIEND indicates whether the specialization is actually just a friend declaration. ATTRLIST is the list of attributes that the specialization is declared with or NULL when it isn't. Returns SPEC, or an equivalent prior declaration, if available. We also store instantiations of field packs in the hash table, even though they are not themselves templates, to make lookup easier. */ static tree register_specialization (tree spec, tree tmpl, tree args, bool is_friend, hashval_t hash) { tree fn; spec_entry **slot = NULL; spec_entry elt; gcc_assert ((TREE_CODE (tmpl) == TEMPLATE_DECL && DECL_P (spec)) || (TREE_CODE (tmpl) == FIELD_DECL && TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK)); if (TREE_CODE (spec) == FUNCTION_DECL && uses_template_parms (DECL_TI_ARGS (spec))) /* This is the FUNCTION_DECL for a partial instantiation. Don't register it; we want the corresponding TEMPLATE_DECL instead. We use `uses_template_parms (DECL_TI_ARGS (spec))' rather than the more obvious `uses_template_parms (spec)' to avoid problems with default function arguments. In particular, given something like this: template <class T> void f(T t1, T t = T()) the default argument expression is not substituted for in an instantiation unless and until it is actually needed. */ return spec; if (optimize_specialization_lookup_p (tmpl)) /* We don't put these specializations in the hash table, but we might want to give an error about a mismatch. */ fn = retrieve_specialization (tmpl, args, 0); else { elt.tmpl = tmpl; elt.args = args; elt.spec = spec; if (hash == 0) hash = spec_hasher::hash (&elt); slot = decl_specializations->find_slot_with_hash (&elt, hash, INSERT); if (*slot) fn = (*slot)->spec; else fn = NULL_TREE; } /* We can sometimes try to re-register a specialization that we've already got. In particular, regenerate_decl_from_template calls duplicate_decls which will update the specialization list. But, we'll still get called again here anyhow. It's more convenient to simply allow this than to try to prevent it. */ if (fn == spec) return spec; else if (fn && DECL_TEMPLATE_SPECIALIZATION (spec)) { if (DECL_TEMPLATE_INSTANTIATION (fn)) { if (DECL_ODR_USED (fn) || DECL_EXPLICIT_INSTANTIATION (fn)) { error ("specialization of %qD after instantiation", fn); return error_mark_node; } else { tree clone; /* This situation should occur only if the first specialization is an implicit instantiation, the second is an explicit specialization, and the implicit instantiation has not yet been used. That situation can occur if we have implicitly instantiated a member function and then specialized it later. We can also wind up here if a friend declaration that looked like an instantiation turns out to be a specialization: template <class T> void foo(T); class S { friend void foo<>(int) }; template <> void foo(int); We transform the existing DECL in place so that any pointers to it become pointers to the updated declaration. If there was a definition for the template, but not for the specialization, we want this to look as if there were no definition, and vice versa. */ DECL_INITIAL (fn) = NULL_TREE; duplicate_decls (spec, fn, /*hiding=*/is_friend); /* The call to duplicate_decls will have applied [temp.expl.spec]: An explicit specialization of a function template is inline only if it is explicitly declared to be, and independently of whether its function template is. to the primary function; now copy the inline bits to the various clones. */ FOR_EACH_CLONE (clone, fn) { DECL_DECLARED_INLINE_P (clone) = DECL_DECLARED_INLINE_P (fn); DECL_SOURCE_LOCATION (clone) = DECL_SOURCE_LOCATION (fn); DECL_DELETED_FN (clone) = DECL_DELETED_FN (fn); } check_specialization_namespace (tmpl); return fn; } } else if (DECL_TEMPLATE_SPECIALIZATION (fn)) { tree dd = duplicate_decls (spec, fn, /*hiding=*/is_friend); if (dd == error_mark_node) /* We've already complained in duplicate_decls. */ return error_mark_node; if (dd == NULL_TREE && DECL_INITIAL (spec)) /* Dup decl failed, but this is a new definition. Set the line number so any errors match this new definition. */ DECL_SOURCE_LOCATION (fn) = DECL_SOURCE_LOCATION (spec); return fn; } } else if (fn) return duplicate_decls (spec, fn, /*hiding=*/is_friend); /* A specialization must be declared in the same namespace as the template it is specializing. */ if (DECL_P (spec) && DECL_TEMPLATE_SPECIALIZATION (spec) && !check_specialization_namespace (tmpl)) DECL_CONTEXT (spec) = DECL_CONTEXT (tmpl); if (slot != NULL /* !optimize_specialization_lookup_p (tmpl) */) { spec_entry *entry = ggc_alloc<spec_entry> (); gcc_assert (tmpl && args && spec); *entry = elt; *slot = entry; if ((TREE_CODE (spec) == FUNCTION_DECL && DECL_NAMESPACE_SCOPE_P (spec) && PRIMARY_TEMPLATE_P (tmpl) && DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (tmpl)) == NULL_TREE) || variable_template_p (tmpl)) /* If TMPL is a forward declaration of a template function, keep a list of all specializations in case we need to reassign them to a friend template later in tsubst_friend_function. Also keep a list of all variable template instantiations so that process_partial_specialization can check whether a later partial specialization would have used it. */ DECL_TEMPLATE_INSTANTIATIONS (tmpl) = tree_cons (args, spec, DECL_TEMPLATE_INSTANTIATIONS (tmpl)); } return spec; } /* Returns true iff two spec_entry nodes are equivalent. */ int comparing_specializations; bool spec_hasher::equal (spec_entry *e1, spec_entry *e2) { int equal; ++comparing_specializations; equal = (e1->tmpl == e2->tmpl && comp_template_args (e1->args, e2->args)); if (equal && flag_concepts /* tmpl could be a FIELD_DECL for a capture pack. */ && TREE_CODE (e1->tmpl) == TEMPLATE_DECL && VAR_P (DECL_TEMPLATE_RESULT (e1->tmpl)) && uses_template_parms (e1->args)) { /* Partial specializations of a variable template can be distinguished by constraints. */ tree c1 = e1->spec ? get_constraints (e1->spec) : NULL_TREE; tree c2 = e2->spec ? get_constraints (e2->spec) : NULL_TREE; equal = equivalent_constraints (c1, c2); } --comparing_specializations; return equal; } /* Returns a hash for a template TMPL and template arguments ARGS. */ static hashval_t hash_tmpl_and_args (tree tmpl, tree args) { hashval_t val = iterative_hash_object (DECL_UID (tmpl), 0); return iterative_hash_template_arg (args, val); } /* Returns a hash for a spec_entry node based on the TMPL and ARGS members, ignoring SPEC. */ hashval_t spec_hasher::hash (spec_entry *e) { return hash_tmpl_and_args (e->tmpl, e->args); } /* Recursively calculate a hash value for a template argument ARG, for use in the hash tables of template specializations. We must be careful to (at least) skip the same entities template_args_equal does. */ hashval_t iterative_hash_template_arg (tree arg, hashval_t val) { if (arg == NULL_TREE) return iterative_hash_object (arg, val); if (!TYPE_P (arg)) /* Strip nop-like things, but not the same as STRIP_NOPS. */ while (CONVERT_EXPR_P (arg) || TREE_CODE (arg) == NON_LVALUE_EXPR || class_nttp_const_wrapper_p (arg)) arg = TREE_OPERAND (arg, 0); enum tree_code code = TREE_CODE (arg); val = iterative_hash_object (code, val); switch (code) { case ARGUMENT_PACK_SELECT: gcc_unreachable (); case ERROR_MARK: return val; case IDENTIFIER_NODE: return iterative_hash_object (IDENTIFIER_HASH_VALUE (arg), val); case TREE_VEC: for (int i = 0, len = TREE_VEC_LENGTH (arg); i < len; ++i) val = iterative_hash_template_arg (TREE_VEC_ELT (arg, i), val); return val; case TYPE_PACK_EXPANSION: case EXPR_PACK_EXPANSION: val = iterative_hash_template_arg (PACK_EXPANSION_PATTERN (arg), val); return iterative_hash_template_arg (PACK_EXPANSION_EXTRA_ARGS (arg), val); case TYPE_ARGUMENT_PACK: case NONTYPE_ARGUMENT_PACK: return iterative_hash_template_arg (ARGUMENT_PACK_ARGS (arg), val); case TREE_LIST: for (; arg; arg = TREE_CHAIN (arg)) val = iterative_hash_template_arg (TREE_VALUE (arg), val); return val; case OVERLOAD: for (lkp_iterator iter (arg); iter; ++iter) val = iterative_hash_template_arg (*iter, val); return val; case CONSTRUCTOR: { tree field, value; unsigned i; iterative_hash_template_arg (TREE_TYPE (arg), val); FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (arg), i, field, value) { val = iterative_hash_template_arg (field, val); val = iterative_hash_template_arg (value, val); } return val; } case PARM_DECL: if (!DECL_ARTIFICIAL (arg)) { val = iterative_hash_object (DECL_PARM_INDEX (arg), val); val = iterative_hash_object (DECL_PARM_LEVEL (arg), val); } return iterative_hash_template_arg (TREE_TYPE (arg), val); case TARGET_EXPR: return iterative_hash_template_arg (TARGET_EXPR_INITIAL (arg), val); case PTRMEM_CST: val = iterative_hash_template_arg (PTRMEM_CST_CLASS (arg), val); return iterative_hash_template_arg (PTRMEM_CST_MEMBER (arg), val); case TEMPLATE_PARM_INDEX: val = iterative_hash_template_arg (TREE_TYPE (TEMPLATE_PARM_DECL (arg)), val); val = iterative_hash_object (TEMPLATE_PARM_LEVEL (arg), val); return iterative_hash_object (TEMPLATE_PARM_IDX (arg), val); case TRAIT_EXPR: val = iterative_hash_object (TRAIT_EXPR_KIND (arg), val); val = iterative_hash_template_arg (TRAIT_EXPR_TYPE1 (arg), val); return iterative_hash_template_arg (TRAIT_EXPR_TYPE2 (arg), val); case BASELINK: val = iterative_hash_template_arg (BINFO_TYPE (BASELINK_BINFO (arg)), val); return iterative_hash_template_arg (DECL_NAME (get_first_fn (arg)), val); case MODOP_EXPR: val = iterative_hash_template_arg (TREE_OPERAND (arg, 0), val); code = TREE_CODE (TREE_OPERAND (arg, 1)); val = iterative_hash_object (code, val); return iterative_hash_template_arg (TREE_OPERAND (arg, 2), val); case LAMBDA_EXPR: /* [temp.over.link] Two lambda-expressions are never considered equivalent. So just hash the closure type. */ return iterative_hash_template_arg (TREE_TYPE (arg), val); case CAST_EXPR: case IMPLICIT_CONV_EXPR: case STATIC_CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case DYNAMIC_CAST_EXPR: case NEW_EXPR: val = iterative_hash_template_arg (TREE_TYPE (arg), val); /* Now hash operands as usual. */ break; case CALL_EXPR: { tree fn = CALL_EXPR_FN (arg); if (tree name = dependent_name (fn)) { if (TREE_CODE (fn) == TEMPLATE_ID_EXPR) val = iterative_hash_template_arg (TREE_OPERAND (fn, 1), val); fn = name; } val = iterative_hash_template_arg (fn, val); call_expr_arg_iterator ai; for (tree x = first_call_expr_arg (arg, &ai); x; x = next_call_expr_arg (&ai)) val = iterative_hash_template_arg (x, val); return val; } default: break; } char tclass = TREE_CODE_CLASS (code); switch (tclass) { case tcc_type: if (tree ats = alias_template_specialization_p (arg, nt_transparent)) { // We want an alias specialization that survived strip_typedefs // to hash differently from its TYPE_CANONICAL, to avoid hash // collisions that compare as different in template_args_equal. // These could be dependent specializations that strip_typedefs // left alone, or untouched specializations because // coerce_template_parms returns the unconverted template // arguments if it sees incomplete argument packs. tree ti = TYPE_ALIAS_TEMPLATE_INFO (ats); return hash_tmpl_and_args (TI_TEMPLATE (ti), TI_ARGS (ti)); } switch (TREE_CODE (arg)) { case TEMPLATE_TEMPLATE_PARM: { tree tpi = TEMPLATE_TYPE_PARM_INDEX (arg); /* Do not recurse with TPI directly, as that is unbounded recursion. */ val = iterative_hash_object (TEMPLATE_PARM_LEVEL (tpi), val); val = iterative_hash_object (TEMPLATE_PARM_IDX (tpi), val); } break; case DECLTYPE_TYPE: val = iterative_hash_template_arg (DECLTYPE_TYPE_EXPR (arg), val); break; default: if (tree canonical = TYPE_CANONICAL (arg)) val = iterative_hash_object (TYPE_HASH (canonical), val); break; } return val; case tcc_declaration: case tcc_constant: return iterative_hash_expr (arg, val); default: gcc_assert (IS_EXPR_CODE_CLASS (tclass)); for (int i = 0, n = cp_tree_operand_length (arg); i < n; ++i) val = iterative_hash_template_arg (TREE_OPERAND (arg, i), val); return val; } gcc_unreachable (); return 0; } /* Unregister the specialization SPEC as a specialization of TMPL. Replace it with NEW_SPEC, if NEW_SPEC is non-NULL. Returns true if the SPEC was listed as a specialization of TMPL. Note that SPEC has been ggc_freed, so we can't look inside it. */ bool reregister_specialization (tree spec, tree tinfo, tree new_spec) { spec_entry *entry; spec_entry elt; elt.tmpl = most_general_template (TI_TEMPLATE (tinfo)); elt.args = TI_ARGS (tinfo); elt.spec = NULL_TREE; entry = decl_specializations->find (&elt); if (entry != NULL) { gcc_assert (entry->spec == spec || entry->spec == new_spec); gcc_assert (new_spec != NULL_TREE); entry->spec = new_spec; return 1; } return 0; } /* Like register_specialization, but for local declarations. We are registering SPEC, an instantiation of TMPL. */ void register_local_specialization (tree spec, tree tmpl) { gcc_assert (tmpl != spec); local_specializations->put (tmpl, spec); } /* TYPE is a class type. Returns true if TYPE is an explicitly specialized class. */ bool explicit_class_specialization_p (tree type) { if (!CLASSTYPE_TEMPLATE_SPECIALIZATION (type)) return false; return !uses_template_parms (CLASSTYPE_TI_ARGS (type)); } /* Print the list of functions at FNS, going through all the overloads for each element of the list. Alternatively, FNS cannot be a TREE_LIST, in which case it will be printed together with all the overloads. MORE and *STR should respectively be FALSE and NULL when the function is called from the outside. They are used internally on recursive calls. print_candidates manages the two parameters and leaves NULL in *STR when it ends. */ static void print_candidates_1 (tree fns, char **str, bool more = false) { if (TREE_CODE (fns) == TREE_LIST) for (; fns; fns = TREE_CHAIN (fns)) print_candidates_1 (TREE_VALUE (fns), str, more || TREE_CHAIN (fns)); else for (lkp_iterator iter (fns); iter;) { tree cand = *iter; ++iter; const char *pfx = *str; if (!pfx) { if (more || iter) pfx = _("candidates are:"); else pfx = _("candidate is:"); *str = get_spaces (pfx); } inform (DECL_SOURCE_LOCATION (cand), "%s %#qD", pfx, cand); } } /* Print the list of candidate FNS in an error message. FNS can also be a TREE_LIST of non-functions in the case of an ambiguous lookup. */ void print_candidates (tree fns) { char *str = NULL; print_candidates_1 (fns, &str); free (str); } /* Get a (possibly) constrained template declaration for the purpose of ordering candidates. */ static tree get_template_for_ordering (tree list) { gcc_assert (TREE_CODE (list) == TREE_LIST); tree f = TREE_VALUE (list); if (tree ti = DECL_TEMPLATE_INFO (f)) return TI_TEMPLATE (ti); return f; } /* Among candidates having the same signature, return the most constrained or NULL_TREE if there is no best candidate. If the signatures of candidates vary (e.g., template specialization vs. member function), then there can be no most constrained. Note that we don't compare constraints on the functions themselves, but rather those of their templates. */ static tree most_constrained_function (tree candidates) { // Try to find the best candidate in a first pass. tree champ = candidates; for (tree c = TREE_CHAIN (champ); c; c = TREE_CHAIN (c)) { int winner = more_constrained (get_template_for_ordering (champ), get_template_for_ordering (c)); if (winner == -1) champ = c; // The candidate is more constrained else if (winner == 0) return NULL_TREE; // Neither is more constrained } // Verify that the champ is better than previous candidates. for (tree c = candidates; c != champ; c = TREE_CHAIN (c)) { if (!more_constrained (get_template_for_ordering (champ), get_template_for_ordering (c))) return NULL_TREE; } return champ; } /* Returns the template (one of the functions given by TEMPLATE_ID) which can be specialized to match the indicated DECL with the explicit template args given in TEMPLATE_ID. The DECL may be NULL_TREE if none is available. In that case, the functions in TEMPLATE_ID are non-members. If NEED_MEMBER_TEMPLATE is nonzero the function is known to be a specialization of a member template. The TEMPLATE_COUNT is the number of references to qualifying template classes that appeared in the name of the function. See check_explicit_specialization for a more accurate description. TSK indicates what kind of template declaration (if any) is being declared. TSK_TEMPLATE indicates that the declaration given by DECL, though a FUNCTION_DECL, has template parameters, and is therefore a template function. The template args (those explicitly specified and those deduced) are output in a newly created vector *TARGS_OUT. If it is impossible to determine the result, an error message is issued. The error_mark_node is returned to indicate failure. */ static tree determine_specialization (tree template_id, tree decl, tree* targs_out, int need_member_template, int template_count, tmpl_spec_kind tsk) { tree fns; tree targs; tree explicit_targs; tree candidates = NULL_TREE; /* A TREE_LIST of templates of which DECL may be a specialization. The TREE_VALUE of each node is a TEMPLATE_DECL. The corresponding TREE_PURPOSE is the set of template arguments that, when used to instantiate the template, would produce a function with the signature of DECL. */ tree templates = NULL_TREE; int header_count; cp_binding_level *b; *targs_out = NULL_TREE; if (template_id == error_mark_node || decl == error_mark_node) return error_mark_node; /* We shouldn't be specializing a member template of an unspecialized class template; we already gave an error in check_specialization_scope, now avoid crashing. */ if (!VAR_P (decl) && template_count && DECL_CLASS_SCOPE_P (decl) && template_class_depth (DECL_CONTEXT (decl)) > 0) { gcc_assert (errorcount); return error_mark_node; } fns = TREE_OPERAND (template_id, 0); explicit_targs = TREE_OPERAND (template_id, 1); if (fns == error_mark_node) return error_mark_node; /* Check for baselinks. */ if (BASELINK_P (fns)) fns = BASELINK_FUNCTIONS (fns); if (TREE_CODE (decl) == FUNCTION_DECL && !is_overloaded_fn (fns)) { error_at (DECL_SOURCE_LOCATION (decl), "%qD is not a function template", fns); return error_mark_node; } else if (VAR_P (decl) && !variable_template_p (fns)) { error ("%qD is not a variable template", fns); return error_mark_node; } /* Count the number of template headers specified for this specialization. */ header_count = 0; for (b = current_binding_level; b->kind == sk_template_parms; b = b->level_chain) ++header_count; tree orig_fns = fns; if (variable_template_p (fns)) { tree parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (fns)); targs = coerce_template_parms (parms, explicit_targs, fns, tf_warning_or_error, /*req_all*/true, /*use_defarg*/true); if (targs != error_mark_node) templates = tree_cons (targs, fns, templates); } else for (lkp_iterator iter (fns); iter; ++iter) { tree fn = *iter; if (TREE_CODE (fn) == TEMPLATE_DECL) { tree decl_arg_types; tree fn_arg_types; tree insttype; /* In case of explicit specialization, we need to check if the number of template headers appearing in the specialization is correct. This is usually done in check_explicit_specialization, but the check done there cannot be exhaustive when specializing member functions. Consider the following code: template <> void A<int>::f(int); template <> template <> void A<int>::f(int); Assuming that A<int> is not itself an explicit specialization already, the first line specializes "f" which is a non-template member function, whilst the second line specializes "f" which is a template member function. So both lines are syntactically correct, and check_explicit_specialization does not reject them. Here, we can do better, as we are matching the specialization against the declarations. We count the number of template headers, and we check if they match TEMPLATE_COUNT + 1 (TEMPLATE_COUNT is the number of qualifying template classes, plus there must be another header for the member template itself). Notice that if header_count is zero, this is not a specialization but rather a template instantiation, so there is no check we can perform here. */ if (header_count && header_count != template_count + 1) continue; /* Check that the number of template arguments at the innermost level for DECL is the same as for FN. */ if (current_binding_level->kind == sk_template_parms && !current_binding_level->explicit_spec_p && (TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (fn)) != TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (current_template_parms)))) continue; /* DECL might be a specialization of FN. */ decl_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl)); fn_arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn)); /* For a non-static member function, we need to make sure that the const qualification is the same. Since get_bindings does not try to merge the "this" parameter, we must do the comparison explicitly. */ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn)) { if (!same_type_p (TREE_VALUE (fn_arg_types), TREE_VALUE (decl_arg_types))) continue; /* And the ref-qualification. */ if (type_memfn_rqual (TREE_TYPE (decl)) != type_memfn_rqual (TREE_TYPE (fn))) continue; } /* Skip the "this" parameter and, for constructors of classes with virtual bases, the VTT parameter. A full specialization of a constructor will have a VTT parameter, but a template never will. */ decl_arg_types = skip_artificial_parms_for (decl, decl_arg_types); fn_arg_types = skip_artificial_parms_for (fn, fn_arg_types); /* Function templates cannot be specializations; there are no partial specializations of functions. Therefore, if the type of DECL does not match FN, there is no match. Note that it should never be the case that we have both candidates added here, and for regular member functions below. */ if (tsk == tsk_template) { if (!comp_template_parms (DECL_TEMPLATE_PARMS (fn), current_template_parms)) continue; if (!same_type_p (TREE_TYPE (TREE_TYPE (decl)), TREE_TYPE (TREE_TYPE (fn)))) continue; if (!compparms (fn_arg_types, decl_arg_types)) continue; tree freq = get_trailing_function_requirements (fn); tree dreq = get_trailing_function_requirements (decl); if (!freq != !dreq) continue; if (freq) { tree fargs = DECL_TI_ARGS (fn); tsubst_flags_t complain = tf_none; freq = tsubst_constraint (freq, fargs, complain, fn); if (!cp_tree_equal (freq, dreq)) continue; } candidates = tree_cons (NULL_TREE, fn, candidates); continue; } /* See whether this function might be a specialization of this template. Suppress access control because we might be trying to make this specialization a friend, and we have already done access control for the declaration of the specialization. */ push_deferring_access_checks (dk_no_check); targs = get_bindings (fn, decl, explicit_targs, /*check_ret=*/true); pop_deferring_access_checks (); if (!targs) /* We cannot deduce template arguments that when used to specialize TMPL will produce DECL. */ continue; if (uses_template_parms (targs)) /* We deduced something involving 'auto', which isn't a valid template argument. */ continue; /* Remove, from the set of candidates, all those functions whose constraints are not satisfied. */ if (flag_concepts && !constraints_satisfied_p (fn, targs)) continue; // Then, try to form the new function type. insttype = tsubst (TREE_TYPE (fn), targs, tf_fndecl_type, NULL_TREE); if (insttype == error_mark_node) continue; fn_arg_types = skip_artificial_parms_for (fn, TYPE_ARG_TYPES (insttype)); if (!compparms (fn_arg_types, decl_arg_types)) continue; /* Save this template, and the arguments deduced. */ templates = tree_cons (targs, fn, templates); } else if (need_member_template) /* FN is an ordinary member function, and we need a specialization of a member template. */ ; else if (TREE_CODE (fn) != FUNCTION_DECL) /* We can get IDENTIFIER_NODEs here in certain erroneous cases. */ ; else if (!DECL_FUNCTION_MEMBER_P (fn)) /* This is just an ordinary non-member function. Nothing can be a specialization of that. */ ; else if (DECL_ARTIFICIAL (fn)) /* Cannot specialize functions that are created implicitly. */ ; else { tree decl_arg_types; /* This is an ordinary member function. However, since we're here, we can assume its enclosing class is a template class. For example, template <typename T> struct S { void f(); }; template <> void S<int>::f() {} Here, S<int>::f is a non-template, but S<int> is a template class. If FN has the same type as DECL, we might be in business. */ if (!DECL_TEMPLATE_INFO (fn)) /* Its enclosing class is an explicit specialization of a template class. This is not a candidate. */ continue; if (!same_type_p (TREE_TYPE (TREE_TYPE (decl)), TREE_TYPE (TREE_TYPE (fn)))) /* The return types differ. */ continue; /* Adjust the type of DECL in case FN is a static member. */ decl_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl)); if (DECL_STATIC_FUNCTION_P (fn) && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) decl_arg_types = TREE_CHAIN (decl_arg_types); if (!compparms (TYPE_ARG_TYPES (TREE_TYPE (fn)), decl_arg_types)) continue; if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn) && (type_memfn_rqual (TREE_TYPE (decl)) != type_memfn_rqual (TREE_TYPE (fn)))) continue; // If the deduced arguments do not satisfy the constraints, // this is not a candidate. if (flag_concepts && !constraints_satisfied_p (fn)) continue; // Add the candidate. candidates = tree_cons (NULL_TREE, fn, candidates); } } if (templates && TREE_CHAIN (templates)) { /* We have: [temp.expl.spec] It is possible for a specialization with a given function signature to be instantiated from more than one function template. In such cases, explicit specification of the template arguments must be used to uniquely identify the function template specialization being specialized. Note that here, there's no suggestion that we're supposed to determine which of the candidate templates is most specialized. However, we, also have: [temp.func.order] Partial ordering of overloaded function template declarations is used in the following contexts to select the function template to which a function template specialization refers: -- when an explicit specialization refers to a function template. So, we do use the partial ordering rules, at least for now. This extension can only serve to make invalid programs valid, so it's safe. And, there is strong anecdotal evidence that the committee intended the partial ordering rules to apply; the EDG front end has that behavior, and John Spicer claims that the committee simply forgot to delete the wording in [temp.expl.spec]. */ tree tmpl = most_specialized_instantiation (templates); if (tmpl != error_mark_node) { templates = tmpl; TREE_CHAIN (templates) = NULL_TREE; } } // Concepts allows multiple declarations of member functions // with the same signature. Like above, we need to rely on // on the partial ordering of those candidates to determine which // is the best. if (flag_concepts && candidates && TREE_CHAIN (candidates)) { if (tree cand = most_constrained_function (candidates)) { candidates = cand; TREE_CHAIN (cand) = NULL_TREE; } } if (templates == NULL_TREE && candidates == NULL_TREE) { error ("template-id %qD for %q+D does not match any template " "declaration", template_id, decl); if (header_count && header_count != template_count + 1) inform (DECL_SOURCE_LOCATION (decl), "saw %d %<template<>%>, need %d for " "specializing a member function template", header_count, template_count + 1); else print_candidates (orig_fns); return error_mark_node; } else if ((templates && TREE_CHAIN (templates)) || (candidates && TREE_CHAIN (candidates)) || (templates && candidates)) { error ("ambiguous template specialization %qD for %q+D", template_id, decl); candidates = chainon (candidates, templates); print_candidates (candidates); return error_mark_node; } /* We have one, and exactly one, match. */ if (candidates) { tree fn = TREE_VALUE (candidates); *targs_out = copy_node (DECL_TI_ARGS (fn)); /* Propagate the candidate's constraints to the declaration. */ if (tsk != tsk_template) set_constraints (decl, get_constraints (fn)); /* DECL is a re-declaration or partial instantiation of a template function. */ if (TREE_CODE (fn) == TEMPLATE_DECL) return fn; /* It was a specialization of an ordinary member function in a template class. */ return DECL_TI_TEMPLATE (fn); } /* It was a specialization of a template. */ targs = DECL_TI_ARGS (DECL_TEMPLATE_RESULT (TREE_VALUE (templates))); if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (targs)) { *targs_out = copy_node (targs); SET_TMPL_ARGS_LEVEL (*targs_out, TMPL_ARGS_DEPTH (*targs_out), TREE_PURPOSE (templates)); } else *targs_out = TREE_PURPOSE (templates); return TREE_VALUE (templates); } /* Returns a chain of parameter types, exactly like the SPEC_TYPES, but with the default argument values filled in from those in the TMPL_TYPES. */ static tree copy_default_args_to_explicit_spec_1 (tree spec_types, tree tmpl_types) { tree new_spec_types; if (!spec_types) return NULL_TREE; if (spec_types == void_list_node) return void_list_node; /* Substitute into the rest of the list. */ new_spec_types = copy_default_args_to_explicit_spec_1 (TREE_CHAIN (spec_types), TREE_CHAIN (tmpl_types)); /* Add the default argument for this parameter. */ return hash_tree_cons (TREE_PURPOSE (tmpl_types), TREE_VALUE (spec_types), new_spec_types); } /* DECL is an explicit specialization. Replicate default arguments from the template it specializes. (That way, code like: template <class T> void f(T = 3); template <> void f(double); void g () { f (); } works, as required.) An alternative approach would be to look up the correct default arguments at the call-site, but this approach is consistent with how implicit instantiations are handled. */ static void copy_default_args_to_explicit_spec (tree decl) { tree tmpl; tree spec_types; tree tmpl_types; tree new_spec_types; tree old_type; tree new_type; tree t; tree object_type = NULL_TREE; tree in_charge = NULL_TREE; tree vtt = NULL_TREE; /* See if there's anything we need to do. */ tmpl = DECL_TI_TEMPLATE (decl); tmpl_types = TYPE_ARG_TYPES (TREE_TYPE (DECL_TEMPLATE_RESULT (tmpl))); for (t = tmpl_types; t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) break; if (!t) return; old_type = TREE_TYPE (decl); spec_types = TYPE_ARG_TYPES (old_type); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) { /* Remove the this pointer, but remember the object's type for CV quals. */ object_type = TREE_TYPE (TREE_VALUE (spec_types)); spec_types = TREE_CHAIN (spec_types); tmpl_types = TREE_CHAIN (tmpl_types); if (DECL_HAS_IN_CHARGE_PARM_P (decl)) { /* DECL may contain more parameters than TMPL due to the extra in-charge parameter in constructors and destructors. */ in_charge = spec_types; spec_types = TREE_CHAIN (spec_types); } if (DECL_HAS_VTT_PARM_P (decl)) { vtt = spec_types; spec_types = TREE_CHAIN (spec_types); } } /* Compute the merged default arguments. */ new_spec_types = copy_default_args_to_explicit_spec_1 (spec_types, tmpl_types); /* Compute the new FUNCTION_TYPE. */ if (object_type) { if (vtt) new_spec_types = hash_tree_cons (TREE_PURPOSE (vtt), TREE_VALUE (vtt), new_spec_types); if (in_charge) /* Put the in-charge parameter back. */ new_spec_types = hash_tree_cons (TREE_PURPOSE (in_charge), TREE_VALUE (in_charge), new_spec_types); new_type = build_method_type_directly (object_type, TREE_TYPE (old_type), new_spec_types); } else new_type = build_function_type (TREE_TYPE (old_type), new_spec_types); new_type = cp_build_type_attribute_variant (new_type, TYPE_ATTRIBUTES (old_type)); new_type = cxx_copy_lang_qualifiers (new_type, old_type); TREE_TYPE (decl) = new_type; } /* Return the number of template headers we expect to see for a definition or specialization of CTYPE or one of its non-template members. */ int num_template_headers_for_class (tree ctype) { int num_templates = 0; while (ctype && CLASS_TYPE_P (ctype)) { /* You're supposed to have one `template <...>' for every template class, but you don't need one for a full specialization. For example: template <class T> struct S{}; template <> struct S<int> { void f(); }; void S<int>::f () {} is correct; there shouldn't be a `template <>' for the definition of `S<int>::f'. */ if (!CLASSTYPE_TEMPLATE_INFO (ctype)) /* If CTYPE does not have template information of any kind, then it is not a template, nor is it nested within a template. */ break; if (explicit_class_specialization_p (ctype)) break; if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (ctype))) ++num_templates; ctype = TYPE_CONTEXT (ctype); } return num_templates; } /* Do a simple sanity check on the template headers that precede the variable declaration DECL. */ void check_template_variable (tree decl) { tree ctx = CP_DECL_CONTEXT (decl); int wanted = num_template_headers_for_class (ctx); if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl))) { if (cxx_dialect < cxx14) pedwarn (DECL_SOURCE_LOCATION (decl), 0, "variable templates only available with " "%<-std=c++14%> or %<-std=gnu++14%>"); // Namespace-scope variable templates should have a template header. ++wanted; } if (template_header_count > wanted) { auto_diagnostic_group d; bool warned = pedwarn (DECL_SOURCE_LOCATION (decl), 0, "too many template headers for %qD " "(should be %d)", decl, wanted); if (warned && CLASS_TYPE_P (ctx) && CLASSTYPE_TEMPLATE_SPECIALIZATION (ctx)) inform (DECL_SOURCE_LOCATION (decl), "members of an explicitly specialized class are defined " "without a template header"); } } /* An explicit specialization whose declarator-id or class-head-name is not qualified shall be declared in the nearest enclosing namespace of the template, or, if the namespace is inline (7.3.1), any namespace from its enclosing namespace set. If the name declared in the explicit instantiation is an unqualified name, the explicit instantiation shall appear in the namespace where its template is declared or, if that namespace is inline (7.3.1), any namespace from its enclosing namespace set. */ void check_unqualified_spec_or_inst (tree t, location_t loc) { tree tmpl = most_general_template (t); if (DECL_NAMESPACE_SCOPE_P (tmpl) && !is_nested_namespace (current_namespace, CP_DECL_CONTEXT (tmpl), true)) { if (processing_specialization) permerror (loc, "explicit specialization of %qD outside its " "namespace must use a nested-name-specifier", tmpl); else if (processing_explicit_instantiation && cxx_dialect >= cxx11) /* This was allowed in C++98, so only pedwarn. */ pedwarn (loc, OPT_Wpedantic, "explicit instantiation of %qD " "outside its namespace must use a nested-name-" "specifier", tmpl); } } /* Warn for a template specialization SPEC that is missing some of a set of function or type attributes that the template TEMPL is declared with. ATTRLIST is a list of additional attributes that SPEC should be taken to ultimately be declared with. */ static void warn_spec_missing_attributes (tree tmpl, tree spec, tree attrlist) { if (DECL_FUNCTION_TEMPLATE_P (tmpl)) tmpl = DECL_TEMPLATE_RESULT (tmpl); /* Avoid warning if the difference between the primary and the specialization is not in one of the attributes below. */ const char* const blacklist[] = { "alloc_align", "alloc_size", "assume_aligned", "format", "format_arg", "malloc", "nonnull", NULL }; /* Put together a list of the black listed attributes that the primary template is declared with that the specialization is not, in case it's not apparent from the most recent declaration of the primary. */ pretty_printer str; unsigned nattrs = decls_mismatched_attributes (tmpl, spec, attrlist, blacklist, &str); if (!nattrs) return; auto_diagnostic_group d; if (warning_at (DECL_SOURCE_LOCATION (spec), OPT_Wmissing_attributes, "explicit specialization %q#D may be missing attributes", spec)) inform (DECL_SOURCE_LOCATION (tmpl), nattrs > 1 ? G_("missing primary template attributes %s") : G_("missing primary template attribute %s"), pp_formatted_text (&str)); } /* Check to see if the function just declared, as indicated in DECLARATOR, and in DECL, is a specialization of a function template. We may also discover that the declaration is an explicit instantiation at this point. Returns DECL, or an equivalent declaration that should be used instead if all goes well. Issues an error message if something is amiss. Returns error_mark_node if the error is not easily recoverable. FLAGS is a bitmask consisting of the following flags: 2: The function has a definition. 4: The function is a friend. The TEMPLATE_COUNT is the number of references to qualifying template classes that appeared in the name of the function. For example, in template <class T> struct S { void f(); }; void S<int>::f(); the TEMPLATE_COUNT would be 1. However, explicitly specialized classes are not counted in the TEMPLATE_COUNT, so that in template <class T> struct S {}; template <> struct S<int> { void f(); } template <> void S<int>::f(); the TEMPLATE_COUNT would be 0. (Note that this declaration is invalid; there should be no template <>.) If the function is a specialization, it is marked as such via DECL_TEMPLATE_SPECIALIZATION. Furthermore, its DECL_TEMPLATE_INFO is set up correctly, and it is added to the list of specializations for that template. */ tree check_explicit_specialization (tree declarator, tree decl, int template_count, int flags, tree attrlist) { int have_def = flags & 2; int is_friend = flags & 4; bool is_concept = flags & 8; int specialization = 0; int explicit_instantiation = 0; int member_specialization = 0; tree ctype = DECL_CLASS_CONTEXT (decl); tree dname = DECL_NAME (decl); tmpl_spec_kind tsk; if (is_friend) { if (!processing_specialization) tsk = tsk_none; else tsk = tsk_excessive_parms; } else tsk = current_tmpl_spec_kind (template_count); switch (tsk) { case tsk_none: if (processing_specialization && !VAR_P (decl)) { specialization = 1; SET_DECL_TEMPLATE_SPECIALIZATION (decl); } else if (TREE_CODE (declarator) == TEMPLATE_ID_EXPR) { if (is_friend) /* This could be something like: template <class T> void f(T); class S { friend void f<>(int); } */ specialization = 1; else { /* This case handles bogus declarations like template <> template <class T> void f<int>(); */ error_at (cp_expr_loc_or_input_loc (declarator), "template-id %qE in declaration of primary template", declarator); return decl; } } break; case tsk_invalid_member_spec: /* The error has already been reported in check_specialization_scope. */ return error_mark_node; case tsk_invalid_expl_inst: error ("template parameter list used in explicit instantiation"); /* Fall through. */ case tsk_expl_inst: if (have_def) error ("definition provided for explicit instantiation"); explicit_instantiation = 1; break; case tsk_excessive_parms: case tsk_insufficient_parms: if (tsk == tsk_excessive_parms) error ("too many template parameter lists in declaration of %qD", decl); else if (template_header_count) error("too few template parameter lists in declaration of %qD", decl); else error("explicit specialization of %qD must be introduced by " "%<template <>%>", decl); /* Fall through. */ case tsk_expl_spec: if (is_concept) error ("explicit specialization declared %<concept%>"); if (VAR_P (decl) && TREE_CODE (declarator) != TEMPLATE_ID_EXPR) /* In cases like template<> constexpr bool v = true; We'll give an error in check_template_variable. */ break; SET_DECL_TEMPLATE_SPECIALIZATION (decl); if (ctype) member_specialization = 1; else specialization = 1; break; case tsk_template: if (TREE_CODE (declarator) == TEMPLATE_ID_EXPR) { /* This case handles bogus declarations like template <> template <class T> void f<int>(); */ if (!uses_template_parms (TREE_OPERAND (declarator, 1))) error_at (cp_expr_loc_or_input_loc (declarator), "template-id %qE in declaration of primary template", declarator); else if (variable_template_p (TREE_OPERAND (declarator, 0))) { /* Partial specialization of variable template. */ SET_DECL_TEMPLATE_SPECIALIZATION (decl); specialization = 1; goto ok; } else if (cxx_dialect < cxx14) error_at (cp_expr_loc_or_input_loc (declarator), "non-type partial specialization %qE " "is not allowed", declarator); else error_at (cp_expr_loc_or_input_loc (declarator), "non-class, non-variable partial specialization %qE " "is not allowed", declarator); return decl; ok:; } if (ctype && CLASSTYPE_TEMPLATE_INSTANTIATION (ctype)) /* This is a specialization of a member template, without specialization the containing class. Something like: template <class T> struct S { template <class U> void f (U); }; template <> template <class U> void S<int>::f(U) {} That's a specialization -- but of the entire template. */ specialization = 1; break; default: gcc_unreachable (); } if ((specialization || member_specialization) /* This doesn't apply to variable templates. */ && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (decl))) { tree t = TYPE_ARG_TYPES (TREE_TYPE (decl)); for (; t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t)) { permerror (input_location, "default argument specified in explicit specialization"); break; } } if (specialization || member_specialization || explicit_instantiation) { tree tmpl = NULL_TREE; tree targs = NULL_TREE; bool was_template_id = (TREE_CODE (declarator) == TEMPLATE_ID_EXPR); bool found_hidden = false; /* Make sure that the declarator is a TEMPLATE_ID_EXPR. */ if (!was_template_id) { tree fns; gcc_assert (identifier_p (declarator)); if (ctype) fns = dname; else { /* If there is no class context, the explicit instantiation must be at namespace scope. */ gcc_assert (DECL_NAMESPACE_SCOPE_P (decl)); /* Find the namespace binding, using the declaration context. */ fns = lookup_qualified_name (CP_DECL_CONTEXT (decl), dname, LOOK_want::NORMAL, true); if (fns == error_mark_node) { /* If lookup fails, look for a friend declaration so we can give a better diagnostic. */ fns = (lookup_qualified_name (CP_DECL_CONTEXT (decl), dname, LOOK_want::NORMAL | LOOK_want::HIDDEN_FRIEND, /*complain*/true)); found_hidden = true; } if (fns == error_mark_node || !is_overloaded_fn (fns)) { error ("%qD is not a template function", dname); fns = error_mark_node; } } declarator = lookup_template_function (fns, NULL_TREE); } if (declarator == error_mark_node) return error_mark_node; if (ctype != NULL_TREE && TYPE_BEING_DEFINED (ctype)) { if (!explicit_instantiation) /* A specialization in class scope. This is invalid, but the error will already have been flagged by check_specialization_scope. */ return error_mark_node; else { /* It's not valid to write an explicit instantiation in class scope, e.g.: class C { template void f(); } This case is caught by the parser. However, on something like: template class C { void f(); }; (which is invalid) we can get here. The error will be issued later. */ ; } return decl; } else if (ctype != NULL_TREE && (identifier_p (TREE_OPERAND (declarator, 0)))) { // We'll match variable templates in start_decl. if (VAR_P (decl)) return decl; /* Find the list of functions in ctype that have the same name as the declared function. */ tree name = TREE_OPERAND (declarator, 0); if (constructor_name_p (name, ctype)) { if (DECL_CONSTRUCTOR_P (decl) ? !TYPE_HAS_USER_CONSTRUCTOR (ctype) : !CLASSTYPE_DESTRUCTOR (ctype)) { /* From [temp.expl.spec]: If such an explicit specialization for the member of a class template names an implicitly-declared special member function (clause _special_), the program is ill-formed. Similar language is found in [temp.explicit]. */ error ("specialization of implicitly-declared special member function"); return error_mark_node; } name = DECL_NAME (decl); } /* For a type-conversion operator, We might be looking for `operator int' which will be a specialization of `operator T'. Grab all the conversion operators, and then select from them. */ tree fns = get_class_binding (ctype, IDENTIFIER_CONV_OP_P (name) ? conv_op_identifier : name); if (fns == NULL_TREE) { error ("no member function %qD declared in %qT", name, ctype); return error_mark_node; } else TREE_OPERAND (declarator, 0) = fns; } /* Figure out what exactly is being specialized at this point. Note that for an explicit instantiation, even one for a member function, we cannot tell a priori whether the instantiation is for a member template, or just a member function of a template class. Even if a member template is being instantiated, the member template arguments may be elided if they can be deduced from the rest of the declaration. */ tmpl = determine_specialization (declarator, decl, &targs, member_specialization, template_count, tsk); if (!tmpl || tmpl == error_mark_node) /* We couldn't figure out what this declaration was specializing. */ return error_mark_node; else { if (found_hidden && TREE_CODE (decl) == FUNCTION_DECL) { auto_diagnostic_group d; if (pedwarn (DECL_SOURCE_LOCATION (decl), 0, "friend declaration %qD is not visible to " "explicit specialization", tmpl)) inform (DECL_SOURCE_LOCATION (tmpl), "friend declaration here"); } if (!ctype && !is_friend && CP_DECL_CONTEXT (decl) == current_namespace) check_unqualified_spec_or_inst (tmpl, DECL_SOURCE_LOCATION (decl)); tree gen_tmpl = most_general_template (tmpl); if (explicit_instantiation) { /* We don't set DECL_EXPLICIT_INSTANTIATION here; that is done by do_decl_instantiation later. */ int arg_depth = TMPL_ARGS_DEPTH (targs); int parm_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl)); if (arg_depth > parm_depth) { /* If TMPL is not the most general template (for example, if TMPL is a friend template that is injected into namespace scope), then there will be too many levels of TARGS. Remove some of them here. */ int i; tree new_targs; new_targs = make_tree_vec (parm_depth); for (i = arg_depth - parm_depth; i < arg_depth; ++i) TREE_VEC_ELT (new_targs, i - (arg_depth - parm_depth)) = TREE_VEC_ELT (targs, i); targs = new_targs; } return instantiate_template (tmpl, targs, tf_error); } /* If we thought that the DECL was a member function, but it turns out to be specializing a static member function, make DECL a static member function as well. */ if (DECL_FUNCTION_TEMPLATE_P (tmpl) && DECL_STATIC_FUNCTION_P (tmpl) && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl)) revert_static_member_fn (decl); /* If this is a specialization of a member template of a template class, we want to return the TEMPLATE_DECL, not the specialization of it. */ if (tsk == tsk_template && !was_template_id) { tree result = DECL_TEMPLATE_RESULT (tmpl); SET_DECL_TEMPLATE_SPECIALIZATION (tmpl); DECL_INITIAL (result) = NULL_TREE; if (have_def) { tree parm; DECL_SOURCE_LOCATION (tmpl) = DECL_SOURCE_LOCATION (decl); DECL_SOURCE_LOCATION (result) = DECL_SOURCE_LOCATION (decl); /* We want to use the argument list specified in the definition, not in the original declaration. */ DECL_ARGUMENTS (result) = DECL_ARGUMENTS (decl); for (parm = DECL_ARGUMENTS (result); parm; parm = DECL_CHAIN (parm)) DECL_CONTEXT (parm) = result; } return register_specialization (tmpl, gen_tmpl, targs, is_friend, 0); } /* Set up the DECL_TEMPLATE_INFO for DECL. */ DECL_TEMPLATE_INFO (decl) = build_template_info (tmpl, targs); if (was_template_id) TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (decl)) = true; /* Inherit default function arguments from the template DECL is specializing. */ if (DECL_FUNCTION_TEMPLATE_P (tmpl)) copy_default_args_to_explicit_spec (decl); /* This specialization has the same protection as the template it specializes. */ TREE_PRIVATE (decl) = TREE_PRIVATE (gen_tmpl); TREE_PROTECTED (decl) = TREE_PROTECTED (gen_tmpl); /* 7.1.1-1 [dcl.stc] A storage-class-specifier shall not be specified in an explicit specialization... The parser rejects these, so unless action is taken here, explicit function specializations will always appear with global linkage. The action recommended by the C++ CWG in response to C++ defect report 605 is to make the storage class and linkage of the explicit specialization match the templated function: http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#605 */ if (tsk == tsk_expl_spec && DECL_FUNCTION_TEMPLATE_P (gen_tmpl)) { tree tmpl_func = DECL_TEMPLATE_RESULT (gen_tmpl); gcc_assert (TREE_CODE (tmpl_func) == FUNCTION_DECL); /* A concept cannot be specialized. */ if (DECL_DECLARED_CONCEPT_P (tmpl_func)) { error ("explicit specialization of function concept %qD", gen_tmpl); return error_mark_node; } /* This specialization has the same linkage and visibility as the function template it specializes. */ TREE_PUBLIC (decl) = TREE_PUBLIC (tmpl_func); if (! TREE_PUBLIC (decl)) { DECL_INTERFACE_KNOWN (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 1; } DECL_THIS_STATIC (decl) = DECL_THIS_STATIC (tmpl_func); if (DECL_VISIBILITY_SPECIFIED (tmpl_func)) { DECL_VISIBILITY_SPECIFIED (decl) = 1; DECL_VISIBILITY (decl) = DECL_VISIBILITY (tmpl_func); } } /* If DECL is a friend declaration, declared using an unqualified name, the namespace associated with DECL may have been set incorrectly. For example, in: template <typename T> void f(T); namespace N { struct S { friend void f<int>(int); } } we will have set the DECL_CONTEXT for the friend declaration to N, rather than to the global namespace. */ if (DECL_NAMESPACE_SCOPE_P (decl)) DECL_CONTEXT (decl) = DECL_CONTEXT (tmpl); if (is_friend && !have_def) /* This is not really a declaration of a specialization. It's just the name of an instantiation. But, it's not a request for an instantiation, either. */ SET_DECL_IMPLICIT_INSTANTIATION (decl); else if (TREE_CODE (decl) == FUNCTION_DECL) /* A specialization is not necessarily COMDAT. */ DECL_COMDAT (decl) = (TREE_PUBLIC (decl) && DECL_DECLARED_INLINE_P (decl)); else if (VAR_P (decl)) DECL_COMDAT (decl) = false; /* If this is a full specialization, register it so that we can find it again. Partial specializations will be registered in process_partial_specialization. */ if (!processing_template_decl) { warn_spec_missing_attributes (gen_tmpl, decl, attrlist); decl = register_specialization (decl, gen_tmpl, targs, is_friend, 0); } /* A 'structor should already have clones. */ gcc_assert (decl == error_mark_node || variable_template_p (tmpl) || !(DECL_CONSTRUCTOR_P (decl) || DECL_DESTRUCTOR_P (decl)) || DECL_CLONED_FUNCTION_P (DECL_CHAIN (decl))); } } return decl; } /* Returns 1 iff PARMS1 and PARMS2 are identical sets of template parameters. These are represented in the same format used for DECL_TEMPLATE_PARMS. */ int comp_template_parms (const_tree parms1, const_tree parms2) { const_tree p1; const_tree p2; if (parms1 == parms2) return 1; for (p1 = parms1, p2 = parms2; p1 != NULL_TREE && p2 != NULL_TREE; p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2)) { tree t1 = TREE_VALUE (p1); tree t2 = TREE_VALUE (p2); int i; gcc_assert (TREE_CODE (t1) == TREE_VEC); gcc_assert (TREE_CODE (t2) == TREE_VEC); if (TREE_VEC_LENGTH (t1) != TREE_VEC_LENGTH (t2)) return 0; for (i = 0; i < TREE_VEC_LENGTH (t2); ++i) { tree parm1 = TREE_VALUE (TREE_VEC_ELT (t1, i)); tree parm2 = TREE_VALUE (TREE_VEC_ELT (t2, i)); /* If either of the template parameters are invalid, assume they match for the sake of error recovery. */ if (error_operand_p (parm1) || error_operand_p (parm2)) return 1; if (TREE_CODE (parm1) != TREE_CODE (parm2)) return 0; if (TREE_CODE (parm1) == TEMPLATE_TYPE_PARM && (TEMPLATE_TYPE_PARAMETER_PACK (parm1) == TEMPLATE_TYPE_PARAMETER_PACK (parm2))) continue; else if (!same_type_p (TREE_TYPE (parm1), TREE_TYPE (parm2))) return 0; } } if ((p1 != NULL_TREE) != (p2 != NULL_TREE)) /* One set of parameters has more parameters lists than the other. */ return 0; return 1; } /* Returns true if two template parameters are declared with equivalent constraints. */ static bool template_parameter_constraints_equivalent_p (const_tree parm1, const_tree parm2) { tree req1 = TREE_TYPE (parm1); tree req2 = TREE_TYPE (parm2); if (!req1 != !req2) return false; if (req1) return cp_tree_equal (req1, req2); return true; } /* Returns true when two template parameters are equivalent. */ static bool template_parameters_equivalent_p (const_tree parm1, const_tree parm2) { tree decl1 = TREE_VALUE (parm1); tree decl2 = TREE_VALUE (parm2); /* If either of the template parameters are invalid, assume they match for the sake of error recovery. */ if (error_operand_p (decl1) || error_operand_p (decl2)) return true; /* ... they declare parameters of the same kind. */ if (TREE_CODE (decl1) != TREE_CODE (decl2)) return false; /* ... one parameter was introduced by a parameter declaration, then both are. This case arises as a result of eagerly rewriting declarations during parsing. */ if (DECL_VIRTUAL_P (decl1) != DECL_VIRTUAL_P (decl2)) return false; /* ... if either declares a pack, they both do. */ if (template_parameter_pack_p (decl1) != template_parameter_pack_p (decl2)) return false; if (TREE_CODE (decl1) == PARM_DECL) { /* ... if they declare non-type parameters, the types are equivalent. */ if (!same_type_p (TREE_TYPE (decl1), TREE_TYPE (decl2))) return false; } else if (TREE_CODE (decl2) == TEMPLATE_DECL) { /* ... if they declare template template parameters, their template parameter lists are equivalent. */ if (!template_heads_equivalent_p (decl1, decl2)) return false; } /* ... if they are declared with a qualified-concept name, they both are, and those names are equivalent. */ return template_parameter_constraints_equivalent_p (parm1, parm2); } /* Returns true if two template parameters lists are equivalent. Two template parameter lists are equivalent if they have the same length and their corresponding parameters are equivalent. PARMS1 and PARMS2 are TREE_LISTs containing TREE_VECs: the data structure returned by DECL_TEMPLATE_PARMS. This is generally the same implementation as comp_template_parms except that it also the concept names and arguments used to introduce parameters. */ static bool template_parameter_lists_equivalent_p (const_tree parms1, const_tree parms2) { if (parms1 == parms2) return true; const_tree p1 = parms1; const_tree p2 = parms2; while (p1 != NULL_TREE && p2 != NULL_TREE) { tree list1 = TREE_VALUE (p1); tree list2 = TREE_VALUE (p2); if (TREE_VEC_LENGTH (list1) != TREE_VEC_LENGTH (list2)) return 0; for (int i = 0; i < TREE_VEC_LENGTH (list2); ++i) { tree parm1 = TREE_VEC_ELT (list1, i); tree parm2 = TREE_VEC_ELT (list2, i); if (!template_parameters_equivalent_p (parm1, parm2)) return false; } p1 = TREE_CHAIN (p1); p2 = TREE_CHAIN (p2); } if ((p1 != NULL_TREE) != (p2 != NULL_TREE)) return false; return true; } /* Return true if the requires-clause of the template parameter lists are equivalent and false otherwise. */ static bool template_requirements_equivalent_p (const_tree parms1, const_tree parms2) { tree req1 = TEMPLATE_PARMS_CONSTRAINTS (parms1); tree req2 = TEMPLATE_PARMS_CONSTRAINTS (parms2); if ((req1 != NULL_TREE) != (req2 != NULL_TREE)) return false; if (!cp_tree_equal (req1, req2)) return false; return true; } /* Returns true if two template heads are equivalent. 17.6.6.1p6: Two template heads are equivalent if their template parameter lists are equivalent and their requires clauses are equivalent. In pre-C++20, this is equivalent to calling comp_template_parms for the template parameters of TMPL1 and TMPL2. */ bool template_heads_equivalent_p (const_tree tmpl1, const_tree tmpl2) { tree parms1 = DECL_TEMPLATE_PARMS (tmpl1); tree parms2 = DECL_TEMPLATE_PARMS (tmpl2); /* Don't change the matching rules for pre-C++20. */ if (cxx_dialect < cxx20) return comp_template_parms (parms1, parms2); /* ... have the same number of template parameters, and their corresponding parameters are equivalent. */ if (!template_parameter_lists_equivalent_p (parms1, parms2)) return false; /* ... if either has a requires-clause, they both do and their corresponding constraint-expressions are equivalent. */ return template_requirements_equivalent_p (parms1, parms2); } /* Determine whether PARM is a parameter pack. */ bool template_parameter_pack_p (const_tree parm) { /* Determine if we have a non-type template parameter pack. */ if (TREE_CODE (parm) == PARM_DECL) return (DECL_TEMPLATE_PARM_P (parm) && TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))); if (TREE_CODE (parm) == TEMPLATE_PARM_INDEX) return TEMPLATE_PARM_PARAMETER_PACK (parm); /* If this is a list of template parameters, we could get a TYPE_DECL or a TEMPLATE_DECL. */ if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) parm = TREE_TYPE (parm); /* Otherwise it must be a type template parameter. */ return ((TREE_CODE (parm) == TEMPLATE_TYPE_PARM || TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM) && TEMPLATE_TYPE_PARAMETER_PACK (parm)); } /* Determine if T is a function parameter pack. */ bool function_parameter_pack_p (const_tree t) { if (t && TREE_CODE (t) == PARM_DECL) return DECL_PACK_P (t); return false; } /* Return the function template declaration of PRIMARY_FUNC_TMPL_INST. PRIMARY_FUNC_TMPL_INST is a primary function template instantiation. */ tree get_function_template_decl (const_tree primary_func_tmpl_inst) { if (! primary_func_tmpl_inst || TREE_CODE (primary_func_tmpl_inst) != FUNCTION_DECL || ! primary_template_specialization_p (primary_func_tmpl_inst)) return NULL; return DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (primary_func_tmpl_inst)); } /* Return true iff the function parameter PARAM_DECL was expanded from the function parameter pack PACK. */ bool function_parameter_expanded_from_pack_p (tree param_decl, tree pack) { if (DECL_ARTIFICIAL (param_decl) || !function_parameter_pack_p (pack)) return false; /* The parameter pack and its pack arguments have the same DECL_PARM_INDEX. */ return DECL_PARM_INDEX (pack) == DECL_PARM_INDEX (param_decl); } /* Determine whether ARGS describes a variadic template args list, i.e., one that is terminated by a template argument pack. */ static bool template_args_variadic_p (tree args) { int nargs; tree last_parm; if (args == NULL_TREE) return false; args = INNERMOST_TEMPLATE_ARGS (args); nargs = TREE_VEC_LENGTH (args); if (nargs == 0) return false; last_parm = TREE_VEC_ELT (args, nargs - 1); return ARGUMENT_PACK_P (last_parm); } /* Generate a new name for the parameter pack name NAME (an IDENTIFIER_NODE) that incorporates its */ static tree make_ith_pack_parameter_name (tree name, int i) { /* Munge the name to include the parameter index. */ #define NUMBUF_LEN 128 char numbuf[NUMBUF_LEN]; char* newname; int newname_len; if (name == NULL_TREE) return name; snprintf (numbuf, NUMBUF_LEN, "%i", i); newname_len = IDENTIFIER_LENGTH (name) + strlen (numbuf) + 2; newname = (char*)alloca (newname_len); snprintf (newname, newname_len, "%s#%i", IDENTIFIER_POINTER (name), i); return get_identifier (newname); } /* Return true if T is a primary function, class or alias template specialization, not including the template pattern. */ bool primary_template_specialization_p (const_tree t) { if (!t) return false; if (TREE_CODE (t) == FUNCTION_DECL || VAR_P (t)) return (DECL_LANG_SPECIFIC (t) && DECL_USE_TEMPLATE (t) && DECL_TEMPLATE_INFO (t) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (t))); else if (CLASS_TYPE_P (t) && !TYPE_DECL_ALIAS_P (TYPE_NAME (t))) return (CLASSTYPE_TEMPLATE_INFO (t) && CLASSTYPE_USE_TEMPLATE (t) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (t))); else if (alias_template_specialization_p (t, nt_transparent)) return true; return false; } /* Return true if PARM is a template template parameter. */ bool template_template_parameter_p (const_tree parm) { return DECL_TEMPLATE_TEMPLATE_PARM_P (parm); } /* Return true iff PARM is a DECL representing a type template parameter. */ bool template_type_parameter_p (const_tree parm) { return (parm && (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) && DECL_TEMPLATE_PARM_P (parm)); } /* Return the template parameters of T if T is a primary template instantiation, NULL otherwise. */ tree get_primary_template_innermost_parameters (const_tree t) { tree parms = NULL, template_info = NULL; if ((template_info = get_template_info (t)) && primary_template_specialization_p (t)) parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (TI_TEMPLATE (template_info))); return parms; } /* Return the template parameters of the LEVELth level from the full list of template parameters PARMS. */ tree get_template_parms_at_level (tree parms, int level) { tree p; if (!parms || TREE_CODE (parms) != TREE_LIST || level > TMPL_PARMS_DEPTH (parms)) return NULL_TREE; for (p = parms; p; p = TREE_CHAIN (p)) if (TMPL_PARMS_DEPTH (p) == level) return p; return NULL_TREE; } /* Returns the template arguments of T if T is a template instantiation, NULL otherwise. */ tree get_template_innermost_arguments (const_tree t) { tree args = NULL, template_info = NULL; if ((template_info = get_template_info (t)) && TI_ARGS (template_info)) args = INNERMOST_TEMPLATE_ARGS (TI_ARGS (template_info)); return args; } /* Return the argument pack elements of T if T is a template argument pack, NULL otherwise. */ tree get_template_argument_pack_elems (const_tree t) { if (TREE_CODE (t) != TYPE_ARGUMENT_PACK && TREE_CODE (t) != NONTYPE_ARGUMENT_PACK) return NULL; return ARGUMENT_PACK_ARGS (t); } /* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the ARGUMENT_PACK_SELECT represents. */ static tree argument_pack_select_arg (tree t) { tree args = ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (t)); tree arg = TREE_VEC_ELT (args, ARGUMENT_PACK_SELECT_INDEX (t)); /* If the selected argument is an expansion E, that most likely means we were called from gen_elem_of_pack_expansion_instantiation during the substituting of an argument pack (of which the Ith element is a pack expansion, where I is ARGUMENT_PACK_SELECT_INDEX) into a pack expansion. In this case, the Ith element resulting from this substituting is going to be a pack expansion, which pattern is the pattern of E. Let's return the pattern of E, and gen_elem_of_pack_expansion_instantiation will build the resulting pack expansion from it. */ if (PACK_EXPANSION_P (arg)) { /* Make sure we aren't throwing away arg info. */ gcc_assert (!PACK_EXPANSION_EXTRA_ARGS (arg)); arg = PACK_EXPANSION_PATTERN (arg); } return arg; } /* True iff FN is a function representing a built-in variadic parameter pack. */ bool builtin_pack_fn_p (tree fn) { if (!fn || TREE_CODE (fn) != FUNCTION_DECL || !DECL_IS_BUILTIN (fn)) return false; if (id_equal (DECL_NAME (fn), "__integer_pack")) return true; return false; } /* True iff CALL is a call to a function representing a built-in variadic parameter pack. */ static bool builtin_pack_call_p (tree call) { if (TREE_CODE (call) != CALL_EXPR) return false; return builtin_pack_fn_p (CALL_EXPR_FN (call)); } /* Return a TREE_VEC for the expansion of __integer_pack(HI). */ static tree expand_integer_pack (tree call, tree args, tsubst_flags_t complain, tree in_decl) { tree ohi = CALL_EXPR_ARG (call, 0); tree hi = tsubst_copy_and_build (ohi, args, complain, in_decl, false/*fn*/, true/*int_cst*/); if (value_dependent_expression_p (hi)) { if (hi != ohi) { call = copy_node (call); CALL_EXPR_ARG (call, 0) = hi; } tree ex = make_pack_expansion (call, complain); tree vec = make_tree_vec (1); TREE_VEC_ELT (vec, 0) = ex; return vec; } else { hi = cxx_constant_value (hi); int len = valid_constant_size_p (hi) ? tree_to_shwi (hi) : -1; /* Calculate the largest value of len that won't make the size of the vec overflow an int. The compiler will exceed resource limits long before this, but it seems a decent place to diagnose. */ int max = ((INT_MAX - sizeof (tree_vec)) / sizeof (tree)) + 1; if (len < 0 || len > max) { if ((complain & tf_error) && hi != error_mark_node) error ("argument to %<__integer_pack%> must be between 0 and %d", max); return error_mark_node; } tree vec = make_tree_vec (len); for (int i = 0; i < len; ++i) TREE_VEC_ELT (vec, i) = size_int (i); return vec; } } /* Return a TREE_VEC for the expansion of built-in template parameter pack CALL. */ static tree expand_builtin_pack_call (tree call, tree args, tsubst_flags_t complain, tree in_decl) { if (!builtin_pack_call_p (call)) return NULL_TREE; tree fn = CALL_EXPR_FN (call); if (id_equal (DECL_NAME (fn), "__integer_pack")) return expand_integer_pack (call, args, complain, in_decl); return NULL_TREE; } /* Structure used to track the progress of find_parameter_packs_r. */ struct find_parameter_pack_data { /* TREE_LIST that will contain all of the parameter packs found by the traversal. */ tree* parameter_packs; /* Set of AST nodes that have been visited by the traversal. */ hash_set<tree> *visited; /* True iff we're making a type pack expansion. */ bool type_pack_expansion_p; }; /* Identifies all of the argument packs that occur in a template argument and appends them to the TREE_LIST inside DATA, which is a find_parameter_pack_data structure. This is a subroutine of make_pack_expansion and uses_parameter_packs. */ static tree find_parameter_packs_r (tree *tp, int *walk_subtrees, void* data) { tree t = *tp; struct find_parameter_pack_data* ppd = (struct find_parameter_pack_data*)data; bool parameter_pack_p = false; /* Don't look through typedefs; we are interested in whether a parameter pack is actually written in the expression/type we're looking at, not the target type. */ if (TYPE_P (t) && typedef_variant_p (t)) { /* But do look at arguments for an alias template. */ if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t)) cp_walk_tree (&TI_ARGS (tinfo), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; } /* Identify whether this is a parameter pack or not. */ switch (TREE_CODE (t)) { case TEMPLATE_PARM_INDEX: if (TEMPLATE_PARM_PARAMETER_PACK (t)) parameter_pack_p = true; break; case TEMPLATE_TYPE_PARM: t = TYPE_MAIN_VARIANT (t); /* FALLTHRU */ case TEMPLATE_TEMPLATE_PARM: /* If the placeholder appears in the decl-specifier-seq of a function parameter pack (14.6.3), or the type-specifier-seq of a type-id that is a pack expansion, the invented template parameter is a template parameter pack. */ if (ppd->type_pack_expansion_p && is_auto (t)) TEMPLATE_TYPE_PARAMETER_PACK (t) = true; if (TEMPLATE_TYPE_PARAMETER_PACK (t)) parameter_pack_p = true; break; case FIELD_DECL: case PARM_DECL: if (DECL_PACK_P (t)) { /* We don't want to walk into the type of a PARM_DECL, because we don't want to see the type parameter pack. */ *walk_subtrees = 0; parameter_pack_p = true; } break; case VAR_DECL: if (DECL_PACK_P (t)) { /* We don't want to walk into the type of a variadic capture proxy, because we don't want to see the type parameter pack. */ *walk_subtrees = 0; parameter_pack_p = true; } else if (variable_template_specialization_p (t)) { cp_walk_tree (&DECL_TI_ARGS (t), find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; } break; case CALL_EXPR: if (builtin_pack_call_p (t)) parameter_pack_p = true; break; case BASES: parameter_pack_p = true; break; default: /* Not a parameter pack. */ break; } if (parameter_pack_p) { /* Add this parameter pack to the list. */ *ppd->parameter_packs = tree_cons (NULL_TREE, t, *ppd->parameter_packs); } if (TYPE_P (t)) cp_walk_tree (&TYPE_CONTEXT (t), &find_parameter_packs_r, ppd, ppd->visited); /* This switch statement will return immediately if we don't find a parameter pack. ??? Should some of these be in cp_walk_subtrees? */ switch (TREE_CODE (t)) { case BOUND_TEMPLATE_TEMPLATE_PARM: /* Check the template itself. */ cp_walk_tree (&TREE_TYPE (TYPE_TI_TEMPLATE (t)), &find_parameter_packs_r, ppd, ppd->visited); return NULL_TREE; case DECL_EXPR: { tree decl = DECL_EXPR_DECL (t); /* Ignore the declaration of a capture proxy for a parameter pack. */ if (is_capture_proxy (decl)) *walk_subtrees = 0; if (is_typedef_decl (decl)) /* Since we stop at typedefs above, we need to look through them at the point of the DECL_EXPR. */ cp_walk_tree (&DECL_ORIGINAL_TYPE (decl), &find_parameter_packs_r, ppd, ppd->visited); return NULL_TREE; } case TEMPLATE_DECL: if (!DECL_TEMPLATE_TEMPLATE_PARM_P (t)) return NULL_TREE; cp_walk_tree (&TREE_TYPE (t), &find_parameter_packs_r, ppd, ppd->visited); return NULL_TREE; case TYPE_PACK_EXPANSION: case EXPR_PACK_EXPANSION: *walk_subtrees = 0; return NULL_TREE; case INTEGER_TYPE: cp_walk_tree (&TYPE_MAX_VALUE (t), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; case IDENTIFIER_NODE: cp_walk_tree (&TREE_TYPE (t), &find_parameter_packs_r, ppd, ppd->visited); *walk_subtrees = 0; return NULL_TREE; case LAMBDA_EXPR: { /* Since we defer implicit capture, look in the parms and body. */ tree fn = lambda_function (t); cp_walk_tree (&TREE_TYPE (fn), &find_parameter_packs_r, ppd, ppd->visited); cp_walk_tree (&DECL_SAVED_TREE (fn), &find_parameter_packs_r, ppd, ppd->visited); return NULL_TREE; } case DECLTYPE_TYPE: { /* When traversing a DECLTYPE_TYPE_EXPR, we need to set type_pack_expansion_p to false so that any placeholders within the expression don't get marked as parameter packs. */ bool type_pack_expansion_p = ppd->type_pack_expansion_p; ppd->type_pack_expansion_p = false; cp_walk_tree (&DECLTYPE_TYPE_EXPR (t), &find_parameter_packs_r, ppd, ppd->visited); ppd->type_pack_expansion_p = type_pack_expansion_p; *walk_subtrees = 0; return NULL_TREE; } case IF_STMT: cp_walk_tree (&IF_COND (t), &find_parameter_packs_r, ppd, ppd->visited); cp_walk_tree (&THEN_CLAUSE (t), &find_parameter_packs_r, ppd, ppd->visited); cp_walk_tree (&ELSE_CLAUSE (t), &find_parameter_packs_r, ppd, ppd->visited); /* Don't walk into IF_STMT_EXTRA_ARGS. */ *walk_subtrees = 0; return NULL_TREE; default: return NULL_TREE; } return NULL_TREE; } /* Determines if the expression or type T uses any parameter packs. */ tree uses_parameter_packs (tree t) { tree parameter_packs = NULL_TREE; struct find_parameter_pack_data ppd; ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = false; cp_walk_tree (&t, &find_parameter_packs_r, &ppd, ppd.visited); delete ppd.visited; return parameter_packs; } /* Turn ARG, which may be an expression, type, or a TREE_LIST representation a base-class initializer into a parameter pack expansion. If all goes well, the resulting node will be an EXPR_PACK_EXPANSION, TYPE_PACK_EXPANSION, or TREE_LIST, respectively. */ tree make_pack_expansion (tree arg, tsubst_flags_t complain) { tree result; tree parameter_packs = NULL_TREE; bool for_types = false; struct find_parameter_pack_data ppd; if (!arg || arg == error_mark_node) return arg; if (TREE_CODE (arg) == TREE_LIST && TREE_PURPOSE (arg)) { /* A TREE_LIST with a non-null TREE_PURPOSE is for a base class initializer. In this case, the TREE_PURPOSE will be a _TYPE node (representing the base class expansion we're initializing) and the TREE_VALUE will be a TREE_LIST containing the initialization arguments. The resulting expansion looks somewhat different from most expansions. Rather than returning just one _EXPANSION, we return a TREE_LIST whose TREE_PURPOSE is a TYPE_PACK_EXPANSION containing the bases that will be initialized. The TREE_VALUE will be identical to the original TREE_VALUE, which is a list of arguments that will be passed to each base. We do not introduce any new pack expansion nodes into the TREE_VALUE (although it is possible that some already exist), because the TREE_PURPOSE and TREE_VALUE all need to be expanded together with the same _EXPANSION node. Note that the TYPE_PACK_EXPANSION in the resulting TREE_PURPOSE will mention the parameter packs in both the bases and the arguments to the bases. */ tree purpose; tree value; tree parameter_packs = NULL_TREE; /* Determine which parameter packs will be used by the base class expansion. */ ppd.visited = new hash_set<tree>; ppd.parameter_packs = &parameter_packs; ppd.type_pack_expansion_p = false; gcc_assert (TYPE_P (TREE_PURPOSE (arg))); cp_walk_tree (&TREE_PURPOSE (arg), &find_parameter_packs_r, &ppd, ppd.visited); if (parameter_packs == NULL_TREE) { if (complain & tf_error) error ("base initializer expansion %qT contains no parameter packs", arg); delete ppd.visited; return error_mark_node; } if (TREE_VALUE (arg) != void_type_node) { /* Collect the sets of parameter packs used in each of the initialization arguments. */ for (value = TREE_VALUE (arg); value; value = TREE_CHAIN (value)) { /* Determine which parameter packs will be expanded in this argument. */ cp_walk_tree (&TREE_VALUE (value), &find_parameter_packs_r, &ppd, ppd.visited); } } delete ppd.visited; /* Create the pack expansion type for the base type. */ purpose = cxx_make_type (TYPE_PACK_EXPANSION); SET_PACK_EXPANSION_PATTERN (purpose, TREE_PURPOSE (arg)); PACK_EXPANSION_PARAMETER_PACKS (purpose) = parameter_packs; PACK_EXPANSION_LOCAL_P (purpose) = at_function_scope_p (); /* Just use structural equality for these TYPE_PACK_EXPANSIONS; they will rarely be compared to anything. */ SET_TYPE_STRUCTURAL_EQUALITY (purpose); return tree_cons (purpose, TREE_VALUE (arg), NULL_TREE); } if (TYPE_P (arg) || TREE_CODE (arg) == TEMPLATE_DECL) for_types = true; /* Build the PACK_EXPANSION_* node. */ result = for_types ? cxx_make_type (TYPE_PACK_EXPANSION) : make_node (EXPR_PACK_EXPANSION); SET_PACK_EXPANSION_PATTERN (result, arg); if (TREE_CODE (result) == EXPR_PACK_EXPANSION) { /* Propagate type and const-expression information. */ TREE_TYPE (result) = TREE_TYPE (arg); TREE_CONSTANT (result) = TREE_CONSTANT (arg); /* Mark this read now, since the expansion might be length 0. */ mark_exp_read (arg); } else /* Just use structural equality for these TYPE_PACK_EXPANSIONS; they will rarely be compared to anything. */ SET_TYPE_STRUCTURAL_EQUALITY (result); /* Determine which parameter packs will be expanded. */ ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = TYPE_P (arg); cp_walk_tree (&arg, &find_parameter_packs_r, &ppd, ppd.visited); delete ppd.visited; /* Make sure we found some parameter packs. */ if (parameter_packs == NULL_TREE) { if (complain & tf_error) { if (TYPE_P (arg)) error ("expansion pattern %qT contains no parameter packs", arg); else error ("expansion pattern %qE contains no parameter packs", arg); } return error_mark_node; } PACK_EXPANSION_PARAMETER_PACKS (result) = parameter_packs; PACK_EXPANSION_LOCAL_P (result) = at_function_scope_p (); return result; } /* Checks T for any "bare" parameter packs, which have not yet been expanded, and issues an error if any are found. This operation can only be done on full expressions or types (e.g., an expression statement, "if" condition, etc.), because we could have expressions like: foo(f(g(h(args)))...) where "args" is a parameter pack. check_for_bare_parameter_packs should not be called for the subexpressions args, h(args), g(h(args)), or f(g(h(args))), because we would produce erroneous error messages. Returns TRUE and emits an error if there were bare parameter packs, returns FALSE otherwise. */ bool check_for_bare_parameter_packs (tree t, location_t loc /* = UNKNOWN_LOCATION */) { tree parameter_packs = NULL_TREE; struct find_parameter_pack_data ppd; if (!processing_template_decl || !t || t == error_mark_node) return false; if (TREE_CODE (t) == TYPE_DECL) t = TREE_TYPE (t); ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = false; cp_walk_tree (&t, &find_parameter_packs_r, &ppd, ppd.visited); delete ppd.visited; /* It's OK for a lambda to have an unexpanded parameter pack from the containing context, but do complain about unexpanded capture packs. */ if (current_class_type && LAMBDA_TYPE_P (current_class_type) && CLASSTYPE_TEMPLATE_INFO (current_class_type)) for (; parameter_packs; parameter_packs = TREE_CHAIN (parameter_packs)) { tree pack = TREE_VALUE (parameter_packs); if (is_capture_proxy (pack)) break; } if (parameter_packs) { if (loc == UNKNOWN_LOCATION) loc = cp_expr_loc_or_input_loc (t); error_at (loc, "parameter packs not expanded with %<...%>:"); while (parameter_packs) { tree pack = TREE_VALUE (parameter_packs); tree name = NULL_TREE; if (TREE_CODE (pack) == TEMPLATE_TYPE_PARM || TREE_CODE (pack) == TEMPLATE_TEMPLATE_PARM) name = TYPE_NAME (pack); else if (TREE_CODE (pack) == TEMPLATE_PARM_INDEX) name = DECL_NAME (TEMPLATE_PARM_DECL (pack)); else if (TREE_CODE (pack) == CALL_EXPR) name = DECL_NAME (CALL_EXPR_FN (pack)); else name = DECL_NAME (pack); if (name) inform (loc, " %qD", name); else inform (loc, " %s", "<anonymous>"); parameter_packs = TREE_CHAIN (parameter_packs); } return true; } return false; } /* Expand any parameter packs that occur in the template arguments in ARGS. */ tree expand_template_argument_pack (tree args) { if (args == error_mark_node) return error_mark_node; tree result_args = NULL_TREE; int in_arg, out_arg = 0, nargs = args ? TREE_VEC_LENGTH (args) : 0; int num_result_args = -1; int non_default_args_count = -1; /* First, determine if we need to expand anything, and the number of slots we'll need. */ for (in_arg = 0; in_arg < nargs; ++in_arg) { tree arg = TREE_VEC_ELT (args, in_arg); if (arg == NULL_TREE) return args; if (ARGUMENT_PACK_P (arg)) { int num_packed = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg)); if (num_result_args < 0) num_result_args = in_arg + num_packed; else num_result_args += num_packed; } else { if (num_result_args >= 0) num_result_args++; } } /* If no expansion is necessary, we're done. */ if (num_result_args < 0) return args; /* Expand arguments. */ result_args = make_tree_vec (num_result_args); if (NON_DEFAULT_TEMPLATE_ARGS_COUNT (args)) non_default_args_count = GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (args); for (in_arg = 0; in_arg < nargs; ++in_arg) { tree arg = TREE_VEC_ELT (args, in_arg); if (ARGUMENT_PACK_P (arg)) { tree packed = ARGUMENT_PACK_ARGS (arg); int i, num_packed = TREE_VEC_LENGTH (packed); for (i = 0; i < num_packed; ++i, ++out_arg) TREE_VEC_ELT (result_args, out_arg) = TREE_VEC_ELT(packed, i); if (non_default_args_count > 0) non_default_args_count += num_packed - 1; } else { TREE_VEC_ELT (result_args, out_arg) = arg; ++out_arg; } } if (non_default_args_count >= 0) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (result_args, non_default_args_count); return result_args; } /* Checks if DECL shadows a template parameter. [temp.local]: A template-parameter shall not be redeclared within its scope (including nested scopes). Emits an error and returns TRUE if the DECL shadows a parameter, returns FALSE otherwise. */ bool check_template_shadow (tree decl) { tree olddecl; /* If we're not in a template, we can't possibly shadow a template parameter. */ if (!current_template_parms) return true; /* Figure out what we're shadowing. */ decl = OVL_FIRST (decl); olddecl = innermost_non_namespace_value (DECL_NAME (decl)); /* If there's no previous binding for this name, we're not shadowing anything, let alone a template parameter. */ if (!olddecl) return true; /* If we're not shadowing a template parameter, we're done. Note that OLDDECL might be an OVERLOAD (or perhaps even an ERROR_MARK), so we can't just blithely assume it to be a _DECL node. */ if (!DECL_P (olddecl) || !DECL_TEMPLATE_PARM_P (olddecl)) return true; /* We check for decl != olddecl to avoid bogus errors for using a name inside a class. We check TPFI to avoid duplicate errors for inline member templates. */ if (decl == olddecl || (DECL_TEMPLATE_PARM_P (decl) && TEMPLATE_PARMS_FOR_INLINE (current_template_parms))) return true; /* Don't complain about the injected class name, as we've already complained about the class itself. */ if (DECL_SELF_REFERENCE_P (decl)) return false; if (DECL_TEMPLATE_PARM_P (decl)) error ("declaration of template parameter %q+D shadows " "template parameter", decl); else error ("declaration of %q+#D shadows template parameter", decl); inform (DECL_SOURCE_LOCATION (olddecl), "template parameter %qD declared here", olddecl); return false; } /* Return a new TEMPLATE_PARM_INDEX with the indicated INDEX, LEVEL, ORIG_LEVEL, DECL, and TYPE. */ static tree build_template_parm_index (int index, int level, int orig_level, tree decl, tree type) { tree t = make_node (TEMPLATE_PARM_INDEX); TEMPLATE_PARM_IDX (t) = index; TEMPLATE_PARM_LEVEL (t) = level; TEMPLATE_PARM_ORIG_LEVEL (t) = orig_level; TEMPLATE_PARM_DECL (t) = decl; TREE_TYPE (t) = type; TREE_CONSTANT (t) = TREE_CONSTANT (decl); TREE_READONLY (t) = TREE_READONLY (decl); return t; } /* Find the canonical type parameter for the given template type parameter. Returns the canonical type parameter, which may be TYPE if no such parameter existed. */ static tree canonical_type_parameter (tree type) { int idx = TEMPLATE_TYPE_IDX (type); gcc_assert (TREE_CODE (type) != TEMPLATE_TEMPLATE_PARM); if (vec_safe_length (canonical_template_parms) <= (unsigned) idx) vec_safe_grow_cleared (canonical_template_parms, idx + 1, true); for (tree list = (*canonical_template_parms)[idx]; list; list = TREE_CHAIN (list)) if (comptypes (type, TREE_VALUE (list), COMPARE_STRUCTURAL)) return TREE_VALUE (list); (*canonical_template_parms)[idx] = tree_cons (NULL_TREE, type, (*canonical_template_parms)[idx]); return type; } /* Return a TEMPLATE_PARM_INDEX, similar to INDEX, but whose TEMPLATE_PARM_LEVEL has been decreased by LEVELS. If such a TEMPLATE_PARM_INDEX already exists, it is returned; otherwise, a new one is created. */ static tree reduce_template_parm_level (tree index, tree type, int levels, tree args, tsubst_flags_t complain) { if (TEMPLATE_PARM_DESCENDANTS (index) == NULL_TREE || (TEMPLATE_PARM_LEVEL (TEMPLATE_PARM_DESCENDANTS (index)) != TEMPLATE_PARM_LEVEL (index) - levels) || !same_type_p (type, TREE_TYPE (TEMPLATE_PARM_DESCENDANTS (index)))) { tree orig_decl = TEMPLATE_PARM_DECL (index); tree decl = build_decl (DECL_SOURCE_LOCATION (orig_decl), TREE_CODE (orig_decl), DECL_NAME (orig_decl), type); TREE_CONSTANT (decl) = TREE_CONSTANT (orig_decl); TREE_READONLY (decl) = TREE_READONLY (orig_decl); DECL_VIRTUAL_P (decl) = DECL_VIRTUAL_P (orig_decl); DECL_ARTIFICIAL (decl) = 1; SET_DECL_TEMPLATE_PARM_P (decl); tree tpi = build_template_parm_index (TEMPLATE_PARM_IDX (index), TEMPLATE_PARM_LEVEL (index) - levels, TEMPLATE_PARM_ORIG_LEVEL (index), decl, type); TEMPLATE_PARM_DESCENDANTS (index) = tpi; TEMPLATE_PARM_PARAMETER_PACK (tpi) = TEMPLATE_PARM_PARAMETER_PACK (index); /* Template template parameters need this. */ tree inner = decl; if (TREE_CODE (decl) == TEMPLATE_DECL) { inner = build_decl (DECL_SOURCE_LOCATION (decl), TYPE_DECL, DECL_NAME (decl), type); DECL_TEMPLATE_RESULT (decl) = inner; DECL_ARTIFICIAL (inner) = true; DECL_TEMPLATE_PARMS (decl) = tsubst_template_parms (DECL_TEMPLATE_PARMS (orig_decl), args, complain); } /* Attach the TPI to the decl. */ if (TREE_CODE (inner) == TYPE_DECL) TEMPLATE_TYPE_PARM_INDEX (type) = tpi; else DECL_INITIAL (decl) = tpi; } return TEMPLATE_PARM_DESCENDANTS (index); } /* Process information from new template parameter PARM and append it to the LIST being built. This new parameter is a non-type parameter iff IS_NON_TYPE is true. This new parameter is a parameter pack iff IS_PARAMETER_PACK is true. The location of PARM is in PARM_LOC. */ tree process_template_parm (tree list, location_t parm_loc, tree parm, bool is_non_type, bool is_parameter_pack) { gcc_assert (TREE_CODE (parm) == TREE_LIST); tree prev = NULL_TREE; int idx = 0; if (list) { prev = tree_last (list); tree p = TREE_VALUE (prev); if (TREE_CODE (p) == TYPE_DECL || TREE_CODE (p) == TEMPLATE_DECL) idx = TEMPLATE_TYPE_IDX (TREE_TYPE (p)); else if (TREE_CODE (p) == PARM_DECL) idx = TEMPLATE_PARM_IDX (DECL_INITIAL (p)); ++idx; } tree decl = NULL_TREE; tree defval = TREE_PURPOSE (parm); tree constr = TREE_TYPE (parm); if (is_non_type) { parm = TREE_VALUE (parm); SET_DECL_TEMPLATE_PARM_P (parm); if (TREE_TYPE (parm) != error_mark_node) { /* [temp.param] The top-level cv-qualifiers on the template-parameter are ignored when determining its type. */ TREE_TYPE (parm) = TYPE_MAIN_VARIANT (TREE_TYPE (parm)); if (invalid_nontype_parm_type_p (TREE_TYPE (parm), 1)) TREE_TYPE (parm) = error_mark_node; else if (uses_parameter_packs (TREE_TYPE (parm)) && !is_parameter_pack /* If we're in a nested template parameter list, the template template parameter could be a parameter pack. */ && processing_template_parmlist == 1) { /* This template parameter is not a parameter pack, but it should be. Complain about "bare" parameter packs. */ check_for_bare_parameter_packs (TREE_TYPE (parm)); /* Recover by calling this a parameter pack. */ is_parameter_pack = true; } } /* A template parameter is not modifiable. */ TREE_CONSTANT (parm) = 1; TREE_READONLY (parm) = 1; decl = build_decl (parm_loc, CONST_DECL, DECL_NAME (parm), TREE_TYPE (parm)); TREE_CONSTANT (decl) = 1; TREE_READONLY (decl) = 1; DECL_INITIAL (parm) = DECL_INITIAL (decl) = build_template_parm_index (idx, processing_template_decl, processing_template_decl, decl, TREE_TYPE (parm)); TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)) = is_parameter_pack; } else { tree t; parm = TREE_VALUE (TREE_VALUE (parm)); if (parm && TREE_CODE (parm) == TEMPLATE_DECL) { t = cxx_make_type (TEMPLATE_TEMPLATE_PARM); /* This is for distinguishing between real templates and template template parameters */ TREE_TYPE (parm) = t; /* any_template_parm_r expects to be able to get the targs of a DECL_TEMPLATE_RESULT. */ tree result = DECL_TEMPLATE_RESULT (parm); TREE_TYPE (result) = t; tree args = template_parms_to_args (DECL_TEMPLATE_PARMS (parm)); tree tinfo = build_template_info (parm, args); retrofit_lang_decl (result); DECL_TEMPLATE_INFO (result) = tinfo; decl = parm; } else { t = cxx_make_type (TEMPLATE_TYPE_PARM); /* parm is either IDENTIFIER_NODE or NULL_TREE. */ decl = build_decl (parm_loc, TYPE_DECL, parm, t); } TYPE_NAME (t) = decl; TYPE_STUB_DECL (t) = decl; parm = decl; TEMPLATE_TYPE_PARM_INDEX (t) = build_template_parm_index (idx, processing_template_decl, processing_template_decl, decl, TREE_TYPE (parm)); TEMPLATE_TYPE_PARAMETER_PACK (t) = is_parameter_pack; if (TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM) SET_TYPE_STRUCTURAL_EQUALITY (t); else TYPE_CANONICAL (t) = canonical_type_parameter (t); } DECL_ARTIFICIAL (decl) = 1; SET_DECL_TEMPLATE_PARM_P (decl); /* Build requirements for the type/template parameter. This must be done after SET_DECL_TEMPLATE_PARM_P or process_template_parm could fail. */ tree reqs = finish_shorthand_constraint (parm, constr); decl = pushdecl (decl); if (!is_non_type) parm = decl; /* Build the parameter node linking the parameter declaration, its default argument (if any), and its constraints (if any). */ parm = build_tree_list (defval, parm); TEMPLATE_PARM_CONSTRAINTS (parm) = reqs; if (prev) TREE_CHAIN (prev) = parm; else list = parm; return list; } /* The end of a template parameter list has been reached. Process the tree list into a parameter vector, converting each parameter into a more useful form. Type parameters are saved as IDENTIFIER_NODEs, and others as PARM_DECLs. */ tree end_template_parm_list (tree parms) { tree saved_parmlist = make_tree_vec (list_length (parms)); /* Pop the dummy parameter level and add the real one. We do not morph the dummy parameter in place, as it might have been captured by a (nested) template-template-parm. */ current_template_parms = TREE_CHAIN (current_template_parms); current_template_parms = tree_cons (size_int (processing_template_decl), saved_parmlist, current_template_parms); for (unsigned ix = 0; parms; ix++) { tree parm = parms; parms = TREE_CHAIN (parms); TREE_CHAIN (parm) = NULL_TREE; TREE_VEC_ELT (saved_parmlist, ix) = parm; } --processing_template_parmlist; return saved_parmlist; } // Explicitly indicate the end of the template parameter list. We assume // that the current template parameters have been constructed and/or // managed explicitly, as when creating new template template parameters // from a shorthand constraint. void end_template_parm_list () { --processing_template_parmlist; } /* end_template_decl is called after a template declaration is seen. */ void end_template_decl (void) { reset_specialization (); if (! processing_template_decl) return; /* This matches the pushlevel in begin_template_parm_list. */ finish_scope (); --processing_template_decl; current_template_parms = TREE_CHAIN (current_template_parms); } /* Takes a TEMPLATE_PARM_P or DECL_TEMPLATE_PARM_P node or a TREE_LIST thereof, and converts it into an argument suitable to be passed to the type substitution functions. Note that if the TREE_LIST contains an error_mark node, the returned argument is error_mark_node. */ tree template_parm_to_arg (tree t) { if (!t) return NULL_TREE; if (TREE_CODE (t) == TREE_LIST) t = TREE_VALUE (t); if (error_operand_p (t)) return error_mark_node; if (DECL_P (t) && DECL_TEMPLATE_PARM_P (t)) { if (TREE_CODE (t) == TYPE_DECL || TREE_CODE (t) == TEMPLATE_DECL) t = TREE_TYPE (t); else t = DECL_INITIAL (t); } gcc_assert (TEMPLATE_PARM_P (t)); if (TREE_CODE (t) == TEMPLATE_TYPE_PARM || TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM) { if (TEMPLATE_TYPE_PARAMETER_PACK (t)) { /* Turn this argument into a TYPE_ARGUMENT_PACK with a single element, which expands T. */ tree vec = make_tree_vec (1); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec)); TREE_VEC_ELT (vec, 0) = make_pack_expansion (t); t = cxx_make_type (TYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (t, vec); } } else { if (TEMPLATE_PARM_PARAMETER_PACK (t)) { /* Turn this argument into a NONTYPE_ARGUMENT_PACK with a single element, which expands T. */ tree vec = make_tree_vec (1); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec)); t = convert_from_reference (t); TREE_VEC_ELT (vec, 0) = make_pack_expansion (t); t = make_node (NONTYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (t, vec); } else t = convert_from_reference (t); } return t; } /* Given a single level of template parameters (a TREE_VEC), return it as a set of template arguments. */ tree template_parms_level_to_args (tree parms) { tree a = copy_node (parms); TREE_TYPE (a) = NULL_TREE; for (int i = TREE_VEC_LENGTH (a) - 1; i >= 0; --i) TREE_VEC_ELT (a, i) = template_parm_to_arg (TREE_VEC_ELT (a, i)); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (a, TREE_VEC_LENGTH (a)); return a; } /* Given a set of template parameters, return them as a set of template arguments. The template parameters are represented as a TREE_VEC, in the form documented in cp-tree.h for template arguments. */ tree template_parms_to_args (tree parms) { tree header; tree args = NULL_TREE; int length = TMPL_PARMS_DEPTH (parms); int l = length; /* If there is only one level of template parameters, we do not create a TREE_VEC of TREE_VECs. Instead, we return a single TREE_VEC containing the arguments. */ if (length > 1) args = make_tree_vec (length); for (header = parms; header; header = TREE_CHAIN (header)) { tree a = template_parms_level_to_args (TREE_VALUE (header)); if (length > 1) TREE_VEC_ELT (args, --l) = a; else args = a; } return args; } /* Within the declaration of a template, return the currently active template parameters as an argument TREE_VEC. */ static tree current_template_args (void) { return template_parms_to_args (current_template_parms); } /* Return the fully generic arguments for of TMPL, i.e. what current_template_args would be while parsing it. */ tree generic_targs_for (tree tmpl) { if (tmpl == NULL_TREE) return NULL_TREE; if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl) || DECL_TEMPLATE_SPECIALIZATION (tmpl)) /* DECL_TEMPLATE_RESULT doesn't have the arguments we want. For a template template parameter, it has no TEMPLATE_INFO; for a partial specialization, it has the arguments for the primary template, and we want the arguments for the partial specialization. */; else if (tree result = DECL_TEMPLATE_RESULT (tmpl)) if (tree ti = get_template_info (result)) return TI_ARGS (ti); return template_parms_to_args (DECL_TEMPLATE_PARMS (tmpl)); } /* Update the declared TYPE by doing any lookups which were thought to be dependent, but are not now that we know the SCOPE of the declarator. */ tree maybe_update_decl_type (tree orig_type, tree scope) { tree type = orig_type; if (type == NULL_TREE) return type; if (TREE_CODE (orig_type) == TYPE_DECL) type = TREE_TYPE (type); if (scope && TYPE_P (scope) && dependent_type_p (scope) && dependent_type_p (type) /* Don't bother building up the args in this case. */ && TREE_CODE (type) != TEMPLATE_TYPE_PARM) { /* tsubst in the args corresponding to the template parameters, including auto if present. Most things will be unchanged, but make_typename_type and tsubst_qualified_id will resolve TYPENAME_TYPEs and SCOPE_REFs that were previously dependent. */ tree args = current_template_args (); tree auto_node = type_uses_auto (type); tree pushed; if (auto_node) { tree auto_vec = make_tree_vec (1); TREE_VEC_ELT (auto_vec, 0) = auto_node; args = add_to_template_args (args, auto_vec); } pushed = push_scope (scope); type = tsubst (type, args, tf_warning_or_error, NULL_TREE); if (pushed) pop_scope (scope); } if (type == error_mark_node) return orig_type; if (TREE_CODE (orig_type) == TYPE_DECL) { if (same_type_p (type, TREE_TYPE (orig_type))) type = orig_type; else type = TYPE_NAME (type); } return type; } /* Return a TEMPLATE_DECL corresponding to DECL, using the indicated template PARMS and constraints, CONSTR. If MEMBER_TEMPLATE_P is true, the new template is a member template. */ static tree build_template_decl (tree decl, tree parms, bool member_template_p) { tree tmpl = build_lang_decl (TEMPLATE_DECL, DECL_NAME (decl), NULL_TREE); SET_DECL_LANGUAGE (tmpl, DECL_LANGUAGE (decl)); DECL_TEMPLATE_PARMS (tmpl) = parms; DECL_TEMPLATE_RESULT (tmpl) = decl; DECL_CONTEXT (tmpl) = DECL_CONTEXT (decl); TREE_TYPE (tmpl) = TREE_TYPE (decl); DECL_SOURCE_LOCATION (tmpl) = DECL_SOURCE_LOCATION (decl); DECL_MEMBER_TEMPLATE_P (tmpl) = member_template_p; return tmpl; } struct template_parm_data { /* The level of the template parameters we are currently processing. */ int level; /* The index of the specialization argument we are currently processing. */ int current_arg; /* An array whose size is the number of template parameters. The elements are nonzero if the parameter has been used in any one of the arguments processed so far. */ int* parms; /* An array whose size is the number of template arguments. The elements are nonzero if the argument makes use of template parameters of this level. */ int* arg_uses_template_parms; }; /* Subroutine of push_template_decl used to see if each template parameter in a partial specialization is used in the explicit argument list. If T is of the LEVEL given in DATA (which is treated as a template_parm_data*), then DATA->PARMS is marked appropriately. */ static int mark_template_parm (tree t, void* data) { int level; int idx; struct template_parm_data* tpd = (struct template_parm_data*) data; template_parm_level_and_index (t, &level, &idx); if (level == tpd->level) { tpd->parms[idx] = 1; tpd->arg_uses_template_parms[tpd->current_arg] = 1; } /* In C++17 the type of a non-type argument is a deduced context. */ if (cxx_dialect >= cxx17 && TREE_CODE (t) == TEMPLATE_PARM_INDEX) for_each_template_parm (TREE_TYPE (t), &mark_template_parm, data, NULL, /*include_nondeduced_p=*/false); /* Return zero so that for_each_template_parm will continue the traversal of the tree; we want to mark *every* template parm. */ return 0; } /* Process the partial specialization DECL. */ static tree process_partial_specialization (tree decl) { tree type = TREE_TYPE (decl); tree tinfo = get_template_info (decl); tree maintmpl = TI_TEMPLATE (tinfo); tree specargs = TI_ARGS (tinfo); tree inner_args = INNERMOST_TEMPLATE_ARGS (specargs); tree main_inner_parms = DECL_INNERMOST_TEMPLATE_PARMS (maintmpl); tree inner_parms; tree inst; int nargs = TREE_VEC_LENGTH (inner_args); int ntparms; int i; bool did_error_intro = false; struct template_parm_data tpd; struct template_parm_data tpd2; gcc_assert (current_template_parms); /* A concept cannot be specialized. */ if (flag_concepts && variable_concept_p (maintmpl)) { error ("specialization of variable concept %q#D", maintmpl); return error_mark_node; } inner_parms = INNERMOST_TEMPLATE_PARMS (current_template_parms); ntparms = TREE_VEC_LENGTH (inner_parms); /* We check that each of the template parameters given in the partial specialization is used in the argument list to the specialization. For example: template <class T> struct S; template <class T> struct S<T*>; The second declaration is OK because `T*' uses the template parameter T, whereas template <class T> struct S<int>; is no good. Even trickier is: template <class T> struct S1 { template <class U> struct S2; template <class U> struct S2<T>; }; The S2<T> declaration is actually invalid; it is a full-specialization. Of course, template <class U> struct S2<T (*)(U)>; or some such would have been OK. */ tpd.level = TMPL_PARMS_DEPTH (current_template_parms); tpd.parms = XALLOCAVEC (int, ntparms); memset (tpd.parms, 0, sizeof (int) * ntparms); tpd.arg_uses_template_parms = XALLOCAVEC (int, nargs); memset (tpd.arg_uses_template_parms, 0, sizeof (int) * nargs); for (i = 0; i < nargs; ++i) { tpd.current_arg = i; for_each_template_parm (TREE_VEC_ELT (inner_args, i), &mark_template_parm, &tpd, NULL, /*include_nondeduced_p=*/false); } for (i = 0; i < ntparms; ++i) if (tpd.parms[i] == 0) { /* One of the template parms was not used in a deduced context in the specialization. */ if (!did_error_intro) { error ("template parameters not deducible in " "partial specialization:"); did_error_intro = true; } inform (input_location, " %qD", TREE_VALUE (TREE_VEC_ELT (inner_parms, i))); } if (did_error_intro) return error_mark_node; /* [temp.class.spec] The argument list of the specialization shall not be identical to the implicit argument list of the primary template. */ tree main_args = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (maintmpl))); if (comp_template_args (inner_args, INNERMOST_TEMPLATE_ARGS (main_args)) && (!flag_concepts || !strictly_subsumes (current_template_constraints (), main_args, maintmpl))) { if (!flag_concepts) error ("partial specialization %q+D does not specialize " "any template arguments; to define the primary template, " "remove the template argument list", decl); else error ("partial specialization %q+D does not specialize any " "template arguments and is not more constrained than " "the primary template; to define the primary template, " "remove the template argument list", decl); inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here"); } /* A partial specialization that replaces multiple parameters of the primary template with a pack expansion is less specialized for those parameters. */ if (nargs < DECL_NTPARMS (maintmpl)) { error ("partial specialization is not more specialized than the " "primary template because it replaces multiple parameters " "with a pack expansion"); inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here"); /* Avoid crash in process_partial_specialization. */ return decl; } else if (nargs > DECL_NTPARMS (maintmpl)) { error ("too many arguments for partial specialization %qT", type); inform (DECL_SOURCE_LOCATION (maintmpl), "primary template here"); /* Avoid crash below. */ return decl; } /* If we aren't in a dependent class, we can actually try deduction. */ else if (tpd.level == 1 /* FIXME we should be able to handle a partial specialization of a partial instantiation, but currently we can't (c++/41727). */ && TMPL_ARGS_DEPTH (specargs) == 1 && !get_partial_spec_bindings (maintmpl, maintmpl, specargs)) { auto_diagnostic_group d; if (permerror (input_location, "partial specialization %qD is not " "more specialized than", decl)) inform (DECL_SOURCE_LOCATION (maintmpl), "primary template %qD", maintmpl); } /* [temp.class.spec] A partially specialized non-type argument expression shall not involve template parameters of the partial specialization except when the argument expression is a simple identifier. The type of a template parameter corresponding to a specialized non-type argument shall not be dependent on a parameter of the specialization. Also, we verify that pack expansions only occur at the end of the argument list. */ tpd2.parms = 0; for (i = 0; i < nargs; ++i) { tree parm = TREE_VALUE (TREE_VEC_ELT (main_inner_parms, i)); tree arg = TREE_VEC_ELT (inner_args, i); tree packed_args = NULL_TREE; int j, len = 1; if (ARGUMENT_PACK_P (arg)) { /* Extract the arguments from the argument pack. We'll be iterating over these in the following loop. */ packed_args = ARGUMENT_PACK_ARGS (arg); len = TREE_VEC_LENGTH (packed_args); } for (j = 0; j < len; j++) { if (packed_args) /* Get the Jth argument in the parameter pack. */ arg = TREE_VEC_ELT (packed_args, j); if (PACK_EXPANSION_P (arg)) { /* Pack expansions must come at the end of the argument list. */ if ((packed_args && j < len - 1) || (!packed_args && i < nargs - 1)) { if (TREE_CODE (arg) == EXPR_PACK_EXPANSION) error ("parameter pack argument %qE must be at the " "end of the template argument list", arg); else error ("parameter pack argument %qT must be at the " "end of the template argument list", arg); } } if (TREE_CODE (arg) == EXPR_PACK_EXPANSION) /* We only care about the pattern. */ arg = PACK_EXPANSION_PATTERN (arg); if (/* These first two lines are the `non-type' bit. */ !TYPE_P (arg) && TREE_CODE (arg) != TEMPLATE_DECL /* This next two lines are the `argument expression is not just a simple identifier' condition and also the `specialized non-type argument' bit. */ && TREE_CODE (arg) != TEMPLATE_PARM_INDEX && !((REFERENCE_REF_P (arg) || TREE_CODE (arg) == VIEW_CONVERT_EXPR) && TREE_CODE (TREE_OPERAND (arg, 0)) == TEMPLATE_PARM_INDEX)) { if ((!packed_args && tpd.arg_uses_template_parms[i]) || (packed_args && uses_template_parms (arg))) error_at (cp_expr_loc_or_input_loc (arg), "template argument %qE involves template " "parameter(s)", arg); else { /* Look at the corresponding template parameter, marking which template parameters its type depends upon. */ tree type = TREE_TYPE (parm); if (!tpd2.parms) { /* We haven't yet initialized TPD2. Do so now. */ tpd2.arg_uses_template_parms = XALLOCAVEC (int, nargs); /* The number of parameters here is the number in the main template, which, as checked in the assertion above, is NARGS. */ tpd2.parms = XALLOCAVEC (int, nargs); tpd2.level = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (maintmpl)); } /* Mark the template parameters. But this time, we're looking for the template parameters of the main template, not in the specialization. */ tpd2.current_arg = i; tpd2.arg_uses_template_parms[i] = 0; memset (tpd2.parms, 0, sizeof (int) * nargs); for_each_template_parm (type, &mark_template_parm, &tpd2, NULL, /*include_nondeduced_p=*/false); if (tpd2.arg_uses_template_parms [i]) { /* The type depended on some template parameters. If they are fully specialized in the specialization, that's OK. */ int j; int count = 0; for (j = 0; j < nargs; ++j) if (tpd2.parms[j] != 0 && tpd.arg_uses_template_parms [j]) ++count; if (count != 0) error_n (input_location, count, "type %qT of template argument %qE depends " "on a template parameter", "type %qT of template argument %qE depends " "on template parameters", type, arg); } } } } } /* We should only get here once. */ if (TREE_CODE (decl) == TYPE_DECL) gcc_assert (!COMPLETE_TYPE_P (type)); // Build the template decl. tree tmpl = build_template_decl (decl, current_template_parms, DECL_MEMBER_TEMPLATE_P (maintmpl)); SET_DECL_TEMPLATE_SPECIALIZATION (tmpl); DECL_TEMPLATE_INFO (tmpl) = build_template_info (maintmpl, specargs); DECL_PRIMARY_TEMPLATE (tmpl) = maintmpl; /* Give template template parms a DECL_CONTEXT of the template for which they are a parameter. */ for (i = 0; i < ntparms; ++i) { tree parm = TREE_VALUE (TREE_VEC_ELT (inner_parms, i)); if (TREE_CODE (parm) == TEMPLATE_DECL) DECL_CONTEXT (parm) = tmpl; } if (VAR_P (decl)) /* We didn't register this in check_explicit_specialization so we could wait until the constraints were set. */ decl = register_specialization (decl, maintmpl, specargs, false, 0); else associate_classtype_constraints (type); DECL_TEMPLATE_SPECIALIZATIONS (maintmpl) = tree_cons (specargs, tmpl, DECL_TEMPLATE_SPECIALIZATIONS (maintmpl)); TREE_TYPE (DECL_TEMPLATE_SPECIALIZATIONS (maintmpl)) = type; for (inst = DECL_TEMPLATE_INSTANTIATIONS (maintmpl); inst; inst = TREE_CHAIN (inst)) { tree instance = TREE_VALUE (inst); if (TYPE_P (instance) ? (COMPLETE_TYPE_P (instance) && CLASSTYPE_IMPLICIT_INSTANTIATION (instance)) : DECL_TEMPLATE_INSTANTIATION (instance)) { tree spec = most_specialized_partial_spec (instance, tf_none); tree inst_decl = (DECL_P (instance) ? instance : TYPE_NAME (instance)); if (!spec) /* OK */; else if (spec == error_mark_node) permerror (input_location, "declaration of %qD ambiguates earlier template " "instantiation for %qD", decl, inst_decl); else if (TREE_VALUE (spec) == tmpl) permerror (input_location, "partial specialization of %qD after instantiation " "of %qD", decl, inst_decl); } } return decl; } /* PARM is a template parameter of some form; return the corresponding TEMPLATE_PARM_INDEX. */ static tree get_template_parm_index (tree parm) { if (TREE_CODE (parm) == PARM_DECL || TREE_CODE (parm) == CONST_DECL) parm = DECL_INITIAL (parm); else if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) parm = TREE_TYPE (parm); if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM || TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM || TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM) parm = TEMPLATE_TYPE_PARM_INDEX (parm); gcc_assert (TREE_CODE (parm) == TEMPLATE_PARM_INDEX); return parm; } /* Subroutine of fixed_parameter_pack_p below. Look for any template parameter packs used by the template parameter PARM. */ static void fixed_parameter_pack_p_1 (tree parm, struct find_parameter_pack_data *ppd) { /* A type parm can't refer to another parm. */ if (TREE_CODE (parm) == TYPE_DECL || parm == error_mark_node) return; else if (TREE_CODE (parm) == PARM_DECL) { cp_walk_tree (&TREE_TYPE (parm), &find_parameter_packs_r, ppd, ppd->visited); return; } gcc_assert (TREE_CODE (parm) == TEMPLATE_DECL); tree vec = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (parm)); for (int i = 0; i < TREE_VEC_LENGTH (vec); ++i) { tree p = TREE_VALUE (TREE_VEC_ELT (vec, i)); if (template_parameter_pack_p (p)) /* Any packs in the type are expanded by this parameter. */; else fixed_parameter_pack_p_1 (p, ppd); } } /* PARM is a template parameter pack. Return any parameter packs used in its type or the type of any of its template parameters. If there are any such packs, it will be instantiated into a fixed template parameter list by partial instantiation rather than be fully deduced. */ tree fixed_parameter_pack_p (tree parm) { /* This can only be true in a member template. */ if (TEMPLATE_PARM_ORIG_LEVEL (get_template_parm_index (parm)) < 2) return NULL_TREE; /* This can only be true for a parameter pack. */ if (!template_parameter_pack_p (parm)) return NULL_TREE; /* A type parm can't refer to another parm. */ if (TREE_CODE (parm) == TYPE_DECL) return NULL_TREE; tree parameter_packs = NULL_TREE; struct find_parameter_pack_data ppd; ppd.parameter_packs = &parameter_packs; ppd.visited = new hash_set<tree>; ppd.type_pack_expansion_p = false; fixed_parameter_pack_p_1 (parm, &ppd); delete ppd.visited; return parameter_packs; } /* Check that a template declaration's use of default arguments and parameter packs is not invalid. Here, PARMS are the template parameters. IS_PRIMARY is true if DECL is the thing declared by a primary template. IS_PARTIAL is true if DECL is a partial specialization. IS_FRIEND_DECL is nonzero if DECL is either a non-defining friend function template declaration or a friend class template declaration. In the function case, 1 indicates a declaration, 2 indicates a redeclaration. When IS_FRIEND_DECL=2, no errors are emitted for extraneous default arguments. Returns TRUE if there were no errors found, FALSE otherwise. */ bool check_default_tmpl_args (tree decl, tree parms, bool is_primary, bool is_partial, int is_friend_decl) { const char *msg; int last_level_to_check; tree parm_level; bool no_errors = true; /* [temp.param] A default template-argument shall not be specified in a function template declaration or a function template definition, nor in the template-parameter-list of the definition of a member of a class template. */ if (TREE_CODE (CP_DECL_CONTEXT (decl)) == FUNCTION_DECL || (TREE_CODE (decl) == FUNCTION_DECL && DECL_LOCAL_DECL_P (decl))) /* You can't have a function template declaration in a local scope, nor you can you define a member of a class template in a local scope. */ return true; if ((TREE_CODE (decl) == TYPE_DECL && TREE_TYPE (decl) && LAMBDA_TYPE_P (TREE_TYPE (decl))) || (TREE_CODE (decl) == FUNCTION_DECL && LAMBDA_FUNCTION_P (decl))) /* A lambda doesn't have an explicit declaration; don't complain about the parms of the enclosing class. */ return true; if (current_class_type && !TYPE_BEING_DEFINED (current_class_type) && DECL_LANG_SPECIFIC (decl) && DECL_DECLARES_FUNCTION_P (decl) /* If this is either a friend defined in the scope of the class or a member function. */ && (DECL_FUNCTION_MEMBER_P (decl) ? same_type_p (DECL_CONTEXT (decl), current_class_type) : DECL_FRIEND_CONTEXT (decl) ? same_type_p (DECL_FRIEND_CONTEXT (decl), current_class_type) : false) /* And, if it was a member function, it really was defined in the scope of the class. */ && (!DECL_FUNCTION_MEMBER_P (decl) || DECL_INITIALIZED_IN_CLASS_P (decl))) /* We already checked these parameters when the template was declared, so there's no need to do it again now. This function was defined in class scope, but we're processing its body now that the class is complete. */ return true; /* Core issue 226 (C++0x only): the following only applies to class templates. */ if (is_primary && ((cxx_dialect == cxx98) || TREE_CODE (decl) != FUNCTION_DECL)) { /* [temp.param] If a template-parameter has a default template-argument, all subsequent template-parameters shall have a default template-argument supplied. */ for (parm_level = parms; parm_level; parm_level = TREE_CHAIN (parm_level)) { tree inner_parms = TREE_VALUE (parm_level); int ntparms = TREE_VEC_LENGTH (inner_parms); int seen_def_arg_p = 0; int i; for (i = 0; i < ntparms; ++i) { tree parm = TREE_VEC_ELT (inner_parms, i); if (parm == error_mark_node) continue; if (TREE_PURPOSE (parm)) seen_def_arg_p = 1; else if (seen_def_arg_p && !template_parameter_pack_p (TREE_VALUE (parm))) { error ("no default argument for %qD", TREE_VALUE (parm)); /* For better subsequent error-recovery, we indicate that there should have been a default argument. */ TREE_PURPOSE (parm) = error_mark_node; no_errors = false; } else if (!is_partial && !is_friend_decl /* Don't complain about an enclosing partial specialization. */ && parm_level == parms && (TREE_CODE (decl) == TYPE_DECL || VAR_P (decl)) && i < ntparms - 1 && template_parameter_pack_p (TREE_VALUE (parm)) /* A fixed parameter pack will be partially instantiated into a fixed length list. */ && !fixed_parameter_pack_p (TREE_VALUE (parm))) { /* A primary class template, primary variable template (DR 2032), or alias template can only have one parameter pack, at the end of the template parameter list. */ error ("parameter pack %q+D must be at the end of the" " template parameter list", TREE_VALUE (parm)); TREE_VALUE (TREE_VEC_ELT (inner_parms, i)) = error_mark_node; no_errors = false; } } } } if (((cxx_dialect == cxx98) && TREE_CODE (decl) != TYPE_DECL) || is_partial || !is_primary || is_friend_decl) /* For an ordinary class template, default template arguments are allowed at the innermost level, e.g.: template <class T = int> struct S {}; but, in a partial specialization, they're not allowed even there, as we have in [temp.class.spec]: The template parameter list of a specialization shall not contain default template argument values. So, for a partial specialization, or for a function template (in C++98/C++03), we look at all of them. */ ; else /* But, for a primary class template that is not a partial specialization we look at all template parameters except the innermost ones. */ parms = TREE_CHAIN (parms); /* Figure out what error message to issue. */ if (is_friend_decl == 2) msg = G_("default template arguments may not be used in function template " "friend re-declaration"); else if (is_friend_decl) msg = G_("default template arguments may not be used in template " "friend declarations"); else if (TREE_CODE (decl) == FUNCTION_DECL && (cxx_dialect == cxx98)) msg = G_("default template arguments may not be used in function templates " "without %<-std=c++11%> or %<-std=gnu++11%>"); else if (is_partial) msg = G_("default template arguments may not be used in " "partial specializations"); else if (current_class_type && CLASSTYPE_IS_TEMPLATE (current_class_type)) msg = G_("default argument for template parameter for class enclosing %qD"); else /* Per [temp.param]/9, "A default template-argument shall not be specified in the template-parameter-lists of the definition of a member of a class template that appears outside of the member's class.", thus if we aren't handling a member of a class template there is no need to examine the parameters. */ return true; if (current_class_type && TYPE_BEING_DEFINED (current_class_type)) /* If we're inside a class definition, there's no need to examine the parameters to the class itself. On the one hand, they will be checked when the class is defined, and, on the other, default arguments are valid in things like: template <class T = double> struct S { template <class U> void f(U); }; Here the default argument for `S' has no bearing on the declaration of `f'. */ last_level_to_check = template_class_depth (current_class_type) + 1; else /* Check everything. */ last_level_to_check = 0; for (parm_level = parms; parm_level && TMPL_PARMS_DEPTH (parm_level) >= last_level_to_check; parm_level = TREE_CHAIN (parm_level)) { tree inner_parms = TREE_VALUE (parm_level); int i; int ntparms; ntparms = TREE_VEC_LENGTH (inner_parms); for (i = 0; i < ntparms; ++i) { if (TREE_VEC_ELT (inner_parms, i) == error_mark_node) continue; if (TREE_PURPOSE (TREE_VEC_ELT (inner_parms, i))) { if (msg) { no_errors = false; if (is_friend_decl == 2) return no_errors; error (msg, decl); msg = 0; } /* Clear out the default argument so that we are not confused later. */ TREE_PURPOSE (TREE_VEC_ELT (inner_parms, i)) = NULL_TREE; } } /* At this point, if we're still interested in issuing messages, they must apply to classes surrounding the object declared. */ if (msg) msg = G_("default argument for template parameter for class " "enclosing %qD"); } return no_errors; } /* Worker for push_template_decl_real, called via for_each_template_parm. DATA is really an int, indicating the level of the parameters we are interested in. If T is a template parameter of that level, return nonzero. */ static int template_parm_this_level_p (tree t, void* data) { int this_level = *(int *)data; int level; if (TREE_CODE (t) == TEMPLATE_PARM_INDEX) level = TEMPLATE_PARM_LEVEL (t); else level = TEMPLATE_TYPE_LEVEL (t); return level == this_level; } /* Worker for uses_outer_template_parms, called via for_each_template_parm. DATA is really an int, indicating the innermost outer level of parameters. If T is a template parameter of that level or further out, return nonzero. */ static int template_parm_outer_level (tree t, void *data) { int this_level = *(int *)data; int level; if (TREE_CODE (t) == TEMPLATE_PARM_INDEX) level = TEMPLATE_PARM_LEVEL (t); else level = TEMPLATE_TYPE_LEVEL (t); return level <= this_level; } /* Creates a TEMPLATE_DECL for the indicated DECL using the template parameters given by current_template_args, or reuses a previously existing one, if appropriate. Returns the DECL, or an equivalent one, if it is replaced via a call to duplicate_decls. If IS_FRIEND is true, DECL is a friend declaration. */ tree push_template_decl (tree decl, bool is_friend) { tree tmpl; tree args; tree info; tree ctx; bool is_primary; bool is_partial; int new_template_p = 0; /* True if the template is a member template, in the sense of [temp.mem]. */ bool member_template_p = false; if (decl == error_mark_node || !current_template_parms) return error_mark_node; /* See if this is a partial specialization. */ is_partial = ((DECL_IMPLICIT_TYPEDEF_P (decl) && TREE_CODE (TREE_TYPE (decl)) != ENUMERAL_TYPE && CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (decl))) || (VAR_P (decl) && DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_SPECIALIZATION (decl) && TINFO_USED_TEMPLATE_ID (DECL_TEMPLATE_INFO (decl)))); /* No surprising friend functions. */ gcc_checking_assert (is_friend || !(TREE_CODE (decl) == FUNCTION_DECL && DECL_UNIQUE_FRIEND_P (decl))); if (is_friend) /* For a friend, we want the context of the friend, not the type of which it is a friend. */ ctx = CP_DECL_CONTEXT (decl); else if (CP_DECL_CONTEXT (decl) && TREE_CODE (CP_DECL_CONTEXT (decl)) != NAMESPACE_DECL) /* In the case of a virtual function, we want the class in which it is defined. */ ctx = CP_DECL_CONTEXT (decl); else /* Otherwise, if we're currently defining some class, the DECL is assumed to be a member of the class. */ ctx = current_scope (); if (ctx && TREE_CODE (ctx) == NAMESPACE_DECL) ctx = NULL_TREE; if (!DECL_CONTEXT (decl)) DECL_CONTEXT (decl) = FROB_CONTEXT (current_namespace); /* See if this is a primary template. */ if (is_friend && ctx && uses_template_parms_level (ctx, processing_template_decl)) /* A friend template that specifies a class context, i.e. template <typename T> friend void A<T>::f(); is not primary. */ is_primary = false; else if (TREE_CODE (decl) == TYPE_DECL && LAMBDA_TYPE_P (TREE_TYPE (decl))) is_primary = false; else is_primary = template_parm_scope_p (); if (is_primary) { warning (OPT_Wtemplates, "template %qD declared", decl); if (DECL_CLASS_SCOPE_P (decl)) member_template_p = true; if (TREE_CODE (decl) == TYPE_DECL && IDENTIFIER_ANON_P (DECL_NAME (decl))) { error ("template class without a name"); return error_mark_node; } else if (TREE_CODE (decl) == FUNCTION_DECL) { if (member_template_p) { if (DECL_OVERRIDE_P (decl) || DECL_FINAL_P (decl)) error ("member template %qD may not have virt-specifiers", decl); } if (DECL_DESTRUCTOR_P (decl)) { /* [temp.mem] A destructor shall not be a member template. */ error_at (DECL_SOURCE_LOCATION (decl), "destructor %qD declared as member template", decl); return error_mark_node; } if (IDENTIFIER_NEWDEL_OP_P (DECL_NAME (decl)) && (!prototype_p (TREE_TYPE (decl)) || TYPE_ARG_TYPES (TREE_TYPE (decl)) == void_list_node || !TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl))) || (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (decl))) == void_list_node))) { /* [basic.stc.dynamic.allocation] An allocation function can be a function template. ... Template allocation functions shall have two or more parameters. */ error ("invalid template declaration of %qD", decl); return error_mark_node; } } else if (DECL_IMPLICIT_TYPEDEF_P (decl) && CLASS_TYPE_P (TREE_TYPE (decl))) { /* Class template, set TEMPLATE_TYPE_PARM_FOR_CLASS. */ tree parms = INNERMOST_TEMPLATE_PARMS (current_template_parms); for (int i = 0; i < TREE_VEC_LENGTH (parms); ++i) { tree t = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (TREE_CODE (t) == TYPE_DECL) t = TREE_TYPE (t); if (TREE_CODE (t) == TEMPLATE_TYPE_PARM) TEMPLATE_TYPE_PARM_FOR_CLASS (t) = true; } } else if (TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_ALIAS_P (decl)) /* alias-declaration */ gcc_assert (!DECL_ARTIFICIAL (decl)); else if (VAR_P (decl)) /* C++14 variable template. */; else if (TREE_CODE (decl) == CONCEPT_DECL) /* C++20 concept definitions. */; else { error ("template declaration of %q#D", decl); return error_mark_node; } } /* Check to see that the rules regarding the use of default arguments are not being violated. We check args for a friend functions when we know whether it's a definition, introducing declaration or re-declaration. */ if (!is_friend || TREE_CODE (decl) != FUNCTION_DECL) check_default_tmpl_args (decl, current_template_parms, is_primary, is_partial, is_friend); /* Ensure that there are no parameter packs in the type of this declaration that have not been expanded. */ if (TREE_CODE (decl) == FUNCTION_DECL) { /* Check each of the arguments individually to see if there are any bare parameter packs. */ tree type = TREE_TYPE (decl); tree arg = DECL_ARGUMENTS (decl); tree argtype = TYPE_ARG_TYPES (type); while (arg && argtype) { if (!DECL_PACK_P (arg) && check_for_bare_parameter_packs (TREE_TYPE (arg))) { /* This is a PARM_DECL that contains unexpanded parameter packs. We have already complained about this in the check_for_bare_parameter_packs call, so just replace these types with ERROR_MARK_NODE. */ TREE_TYPE (arg) = error_mark_node; TREE_VALUE (argtype) = error_mark_node; } arg = DECL_CHAIN (arg); argtype = TREE_CHAIN (argtype); } /* Check for bare parameter packs in the return type and the exception specifiers. */ if (check_for_bare_parameter_packs (TREE_TYPE (type))) /* Errors were already issued, set return type to int as the frontend doesn't expect error_mark_node as the return type. */ TREE_TYPE (type) = integer_type_node; if (check_for_bare_parameter_packs (TYPE_RAISES_EXCEPTIONS (type))) TYPE_RAISES_EXCEPTIONS (type) = NULL_TREE; } else if (check_for_bare_parameter_packs (is_typedef_decl (decl) ? DECL_ORIGINAL_TYPE (decl) : TREE_TYPE (decl))) { TREE_TYPE (decl) = error_mark_node; return error_mark_node; } if (is_partial) return process_partial_specialization (decl); args = current_template_args (); if (!ctx || TREE_CODE (ctx) == FUNCTION_DECL || (CLASS_TYPE_P (ctx) && TYPE_BEING_DEFINED (ctx)) || (TREE_CODE (decl) == TYPE_DECL && LAMBDA_TYPE_P (TREE_TYPE (decl))) || (is_friend && !(DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl)))) { if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && DECL_TI_TEMPLATE (decl)) tmpl = DECL_TI_TEMPLATE (decl); /* If DECL is a TYPE_DECL for a class-template, then there won't be DECL_LANG_SPECIFIC. The information equivalent to DECL_TEMPLATE_INFO is found in TYPE_TEMPLATE_INFO instead. */ else if (DECL_IMPLICIT_TYPEDEF_P (decl) && TYPE_TEMPLATE_INFO (TREE_TYPE (decl)) && TYPE_TI_TEMPLATE (TREE_TYPE (decl))) { /* Since a template declaration already existed for this class-type, we must be redeclaring it here. Make sure that the redeclaration is valid. */ redeclare_class_template (TREE_TYPE (decl), current_template_parms, current_template_constraints ()); /* We don't need to create a new TEMPLATE_DECL; just use the one we already had. */ tmpl = TYPE_TI_TEMPLATE (TREE_TYPE (decl)); } else { tmpl = build_template_decl (decl, current_template_parms, member_template_p); new_template_p = 1; if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_SPECIALIZATION (decl)) { /* A specialization of a member template of a template class. */ SET_DECL_TEMPLATE_SPECIALIZATION (tmpl); DECL_TEMPLATE_INFO (tmpl) = DECL_TEMPLATE_INFO (decl); DECL_TEMPLATE_INFO (decl) = NULL_TREE; } } } else { tree a, t, current, parms; int i; tree tinfo = get_template_info (decl); if (!tinfo) { error ("template definition of non-template %q#D", decl); return error_mark_node; } tmpl = TI_TEMPLATE (tinfo); if (DECL_FUNCTION_TEMPLATE_P (tmpl) && DECL_TEMPLATE_INFO (decl) && DECL_TI_ARGS (decl) && DECL_TEMPLATE_SPECIALIZATION (decl) && DECL_MEMBER_TEMPLATE_P (tmpl)) { tree new_tmpl; /* The declaration is a specialization of a member template, declared outside the class. Therefore, the innermost template arguments will be NULL, so we replace them with the arguments determined by the earlier call to check_explicit_specialization. */ args = DECL_TI_ARGS (decl); new_tmpl = build_template_decl (decl, current_template_parms, member_template_p); DECL_TI_TEMPLATE (decl) = new_tmpl; SET_DECL_TEMPLATE_SPECIALIZATION (new_tmpl); DECL_TEMPLATE_INFO (new_tmpl) = build_template_info (tmpl, args); register_specialization (new_tmpl, most_general_template (tmpl), args, is_friend, 0); return decl; } /* Make sure the template headers we got make sense. */ parms = DECL_TEMPLATE_PARMS (tmpl); i = TMPL_PARMS_DEPTH (parms); if (TMPL_ARGS_DEPTH (args) != i) { error ("expected %d levels of template parms for %q#D, got %d", i, decl, TMPL_ARGS_DEPTH (args)); DECL_INTERFACE_KNOWN (decl) = 1; return error_mark_node; } else for (current = decl; i > 0; --i, parms = TREE_CHAIN (parms)) { a = TMPL_ARGS_LEVEL (args, i); t = INNERMOST_TEMPLATE_PARMS (parms); if (TREE_VEC_LENGTH (t) != TREE_VEC_LENGTH (a)) { if (current == decl) error ("got %d template parameters for %q#D", TREE_VEC_LENGTH (a), decl); else error ("got %d template parameters for %q#T", TREE_VEC_LENGTH (a), current); error (" but %d required", TREE_VEC_LENGTH (t)); /* Avoid crash in import_export_decl. */ DECL_INTERFACE_KNOWN (decl) = 1; return error_mark_node; } if (current == decl) current = ctx; else if (current == NULL_TREE) /* Can happen in erroneous input. */ break; else current = get_containing_scope (current); } /* Check that the parms are used in the appropriate qualifying scopes in the declarator. */ if (!comp_template_args (TI_ARGS (tinfo), TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl))))) { error ("template arguments to %qD do not match original " "template %qD", decl, DECL_TEMPLATE_RESULT (tmpl)); if (!uses_template_parms (TI_ARGS (tinfo))) inform (input_location, "use %<template<>%> for" " an explicit specialization"); /* Avoid crash in import_export_decl. */ DECL_INTERFACE_KNOWN (decl) = 1; return error_mark_node; } } gcc_checking_assert (DECL_TEMPLATE_RESULT (tmpl) == decl); if (new_template_p) { /* Push template declarations for global functions and types. Note that we do not try to push a global template friend declared in a template class; such a thing may well depend on the template parameters of the class and we'll push it when instantiating the befriending class. */ if (!ctx && !(is_friend && template_class_depth (current_class_type) > 0)) { tmpl = pushdecl_namespace_level (tmpl, /*hiding=*/is_friend); if (tmpl == error_mark_node) return error_mark_node; } } else /* The type may have been completed, or (erroneously) changed. */ TREE_TYPE (tmpl) = TREE_TYPE (decl); if (is_primary) { tree parms = DECL_TEMPLATE_PARMS (tmpl); DECL_PRIMARY_TEMPLATE (tmpl) = tmpl; /* Give template template parms a DECL_CONTEXT of the template for which they are a parameter. */ parms = INNERMOST_TEMPLATE_PARMS (parms); for (int i = TREE_VEC_LENGTH (parms) - 1; i >= 0; --i) { tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (TREE_CODE (parm) == TEMPLATE_DECL) DECL_CONTEXT (parm) = tmpl; } if (TREE_CODE (decl) == TYPE_DECL && TYPE_DECL_ALIAS_P (decl)) { if (tree constr = TEMPLATE_PARMS_CONSTRAINTS (DECL_TEMPLATE_PARMS (tmpl))) { /* ??? Why don't we do this here for all templates? */ constr = build_constraints (constr, NULL_TREE); set_constraints (decl, constr); } if (complex_alias_template_p (tmpl)) TEMPLATE_DECL_COMPLEX_ALIAS_P (tmpl) = true; } } /* The DECL_TI_ARGS of DECL contains full set of arguments referring back to its most general template. If TMPL is a specialization, ARGS may only have the innermost set of arguments. Add the missing argument levels if necessary. */ if (DECL_TEMPLATE_INFO (tmpl)) args = add_outermost_template_args (DECL_TI_ARGS (tmpl), args); info = build_template_info (tmpl, args); if (DECL_IMPLICIT_TYPEDEF_P (decl)) SET_TYPE_TEMPLATE_INFO (TREE_TYPE (tmpl), info); else { if (is_primary) retrofit_lang_decl (decl); if (DECL_LANG_SPECIFIC (decl) && !(VAR_OR_FUNCTION_DECL_P (decl) && DECL_LOCAL_DECL_P (decl))) DECL_TEMPLATE_INFO (decl) = info; } if (flag_implicit_templates && !is_friend && TREE_PUBLIC (decl) && VAR_OR_FUNCTION_DECL_P (decl)) /* Set DECL_COMDAT on template instantiations; if we force them to be emitted by explicit instantiation, mark_needed will tell cgraph to do the right thing. */ DECL_COMDAT (decl) = true; return DECL_TEMPLATE_RESULT (tmpl); } /* FN is an inheriting constructor that inherits from the constructor template INHERITED; turn FN into a constructor template with a matching template header. */ tree add_inherited_template_parms (tree fn, tree inherited) { tree inner_parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (inherited)); inner_parms = copy_node (inner_parms); tree parms = tree_cons (size_int (processing_template_decl + 1), inner_parms, current_template_parms); tree tmpl = build_template_decl (fn, parms, /*member*/true); tree args = template_parms_to_args (parms); DECL_TEMPLATE_INFO (fn) = build_template_info (tmpl, args); DECL_ARTIFICIAL (tmpl) = true; DECL_PRIMARY_TEMPLATE (tmpl) = tmpl; return tmpl; } /* Called when a class template TYPE is redeclared with the indicated template PARMS, e.g.: template <class T> struct S; template <class T> struct S {}; */ bool redeclare_class_template (tree type, tree parms, tree cons) { tree tmpl; tree tmpl_parms; int i; if (!TYPE_TEMPLATE_INFO (type)) { error ("%qT is not a template type", type); return false; } tmpl = TYPE_TI_TEMPLATE (type); if (!PRIMARY_TEMPLATE_P (tmpl)) /* The type is nested in some template class. Nothing to worry about here; there are no new template parameters for the nested type. */ return true; if (!parms) { error ("template specifiers not specified in declaration of %qD", tmpl); return false; } parms = INNERMOST_TEMPLATE_PARMS (parms); tmpl_parms = DECL_INNERMOST_TEMPLATE_PARMS (tmpl); if (TREE_VEC_LENGTH (parms) != TREE_VEC_LENGTH (tmpl_parms)) { error_n (input_location, TREE_VEC_LENGTH (parms), "redeclared with %d template parameter", "redeclared with %d template parameters", TREE_VEC_LENGTH (parms)); inform_n (DECL_SOURCE_LOCATION (tmpl), TREE_VEC_LENGTH (tmpl_parms), "previous declaration %qD used %d template parameter", "previous declaration %qD used %d template parameters", tmpl, TREE_VEC_LENGTH (tmpl_parms)); return false; } for (i = 0; i < TREE_VEC_LENGTH (tmpl_parms); ++i) { tree tmpl_parm; tree parm; tree tmpl_default; tree parm_default; if (TREE_VEC_ELT (tmpl_parms, i) == error_mark_node || TREE_VEC_ELT (parms, i) == error_mark_node) continue; tmpl_parm = TREE_VALUE (TREE_VEC_ELT (tmpl_parms, i)); if (error_operand_p (tmpl_parm)) return false; parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); tmpl_default = TREE_PURPOSE (TREE_VEC_ELT (tmpl_parms, i)); parm_default = TREE_PURPOSE (TREE_VEC_ELT (parms, i)); /* TMPL_PARM and PARM can be either TYPE_DECL, PARM_DECL, or TEMPLATE_DECL. */ if (TREE_CODE (tmpl_parm) != TREE_CODE (parm) || (TREE_CODE (tmpl_parm) != TYPE_DECL && !same_type_p (TREE_TYPE (tmpl_parm), TREE_TYPE (parm))) || (TREE_CODE (tmpl_parm) != PARM_DECL && (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (tmpl_parm)) != TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm)))) || (TREE_CODE (tmpl_parm) == PARM_DECL && (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (tmpl_parm)) != TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))))) { auto_diagnostic_group d; error ("template parameter %q+#D", tmpl_parm); inform (input_location, "redeclared here as %q#D", parm); return false; } /* The parameters can be declared to introduce different constraints. */ tree p1 = TREE_VEC_ELT (tmpl_parms, i); tree p2 = TREE_VEC_ELT (parms, i); if (!template_parameter_constraints_equivalent_p (p1, p2)) { auto_diagnostic_group d; error ("declaration of template parameter %q+#D with different " "constraints", parm); inform (DECL_SOURCE_LOCATION (tmpl_parm), "original declaration appeared here"); return false; } if (tmpl_default != NULL_TREE && parm_default != NULL_TREE) { /* We have in [temp.param]: A template-parameter may not be given default arguments by two different declarations in the same scope. */ auto_diagnostic_group d; error_at (input_location, "redefinition of default argument for %q#D", parm); inform (DECL_SOURCE_LOCATION (tmpl_parm), "original definition appeared here"); return false; } if (parm_default != NULL_TREE) /* Update the previous template parameters (which are the ones that will really count) with the new default value. */ TREE_PURPOSE (TREE_VEC_ELT (tmpl_parms, i)) = parm_default; else if (tmpl_default != NULL_TREE) /* Update the new parameters, too; they'll be used as the parameters for any members. */ TREE_PURPOSE (TREE_VEC_ELT (parms, i)) = tmpl_default; /* Give each template template parm in this redeclaration a DECL_CONTEXT of the template for which they are a parameter. */ if (TREE_CODE (parm) == TEMPLATE_DECL) { gcc_assert (DECL_CONTEXT (parm) == NULL_TREE); DECL_CONTEXT (parm) = tmpl; } if (TREE_CODE (parm) == TYPE_DECL) TEMPLATE_TYPE_PARM_FOR_CLASS (TREE_TYPE (parm)) = true; } tree ci = get_constraints (tmpl); tree req1 = ci ? CI_TEMPLATE_REQS (ci) : NULL_TREE; tree req2 = cons ? CI_TEMPLATE_REQS (cons) : NULL_TREE; /* Two classes with different constraints declare different entities. */ if (!cp_tree_equal (req1, req2)) { auto_diagnostic_group d; error_at (input_location, "redeclaration %q#D with different " "constraints", tmpl); inform (DECL_SOURCE_LOCATION (tmpl), "original declaration appeared here"); return false; } return true; } /* The actual substitution part of instantiate_non_dependent_expr_sfinae, to be used when the caller has already checked (processing_template_decl && !instantiation_dependent_expression_p (expr) && potential_constant_expression (expr)) and cleared processing_template_decl. */ tree instantiate_non_dependent_expr_internal (tree expr, tsubst_flags_t complain) { return tsubst_copy_and_build (expr, /*args=*/NULL_TREE, complain, /*in_decl=*/NULL_TREE, /*function_p=*/false, /*integral_constant_expression_p=*/true); } /* Simplify EXPR if it is a non-dependent expression. Returns the (possibly simplified) expression. */ tree instantiate_non_dependent_expr_sfinae (tree expr, tsubst_flags_t complain) { if (expr == NULL_TREE) return NULL_TREE; /* If we're in a template, but EXPR isn't value dependent, simplify it. We're supposed to treat: template <typename T> void f(T[1 + 1]); template <typename T> void f(T[2]); as two declarations of the same function, for example. */ if (processing_template_decl && is_nondependent_constant_expression (expr)) { processing_template_decl_sentinel s; expr = instantiate_non_dependent_expr_internal (expr, complain); } return expr; } tree instantiate_non_dependent_expr (tree expr) { return instantiate_non_dependent_expr_sfinae (expr, tf_error); } /* Like instantiate_non_dependent_expr, but return NULL_TREE rather than an uninstantiated expression. */ tree instantiate_non_dependent_or_null (tree expr) { if (expr == NULL_TREE) return NULL_TREE; if (processing_template_decl) { if (!is_nondependent_constant_expression (expr)) expr = NULL_TREE; else { processing_template_decl_sentinel s; expr = instantiate_non_dependent_expr_internal (expr, tf_error); } } return expr; } /* True iff T is a specialization of a variable template. */ bool variable_template_specialization_p (tree t) { if (!VAR_P (t) || !DECL_LANG_SPECIFIC (t) || !DECL_TEMPLATE_INFO (t)) return false; tree tmpl = DECL_TI_TEMPLATE (t); return variable_template_p (tmpl); } /* Return TRUE iff T is a type alias, a TEMPLATE_DECL for an alias template declaration, or a TYPE_DECL for an alias declaration. */ bool alias_type_or_template_p (tree t) { if (t == NULL_TREE) return false; return ((TREE_CODE (t) == TYPE_DECL && TYPE_DECL_ALIAS_P (t)) || (TYPE_P (t) && TYPE_NAME (t) && TYPE_DECL_ALIAS_P (TYPE_NAME (t))) || DECL_ALIAS_TEMPLATE_P (t)); } /* If T is a specialization of an alias template, return it; otherwise return NULL_TREE. If TRANSPARENT_TYPEDEFS is true, look through other aliases. */ tree alias_template_specialization_p (const_tree t, bool transparent_typedefs) { if (!TYPE_P (t)) return NULL_TREE; /* It's an alias template specialization if it's an alias and its TYPE_NAME is a specialization of a primary template. */ if (typedef_variant_p (t)) { if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t)) if (PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo))) return CONST_CAST_TREE (t); if (transparent_typedefs) return alias_template_specialization_p (DECL_ORIGINAL_TYPE (TYPE_NAME (t)), transparent_typedefs); } return NULL_TREE; } /* An alias template is complex from a SFINAE perspective if a template-id using that alias can be ill-formed when the expansion is not, as with the void_t template. We determine this by checking whether the expansion for the alias template uses all its template parameters. */ struct uses_all_template_parms_data { int level; bool *seen; }; static int uses_all_template_parms_r (tree t, void *data_) { struct uses_all_template_parms_data &data = *(struct uses_all_template_parms_data*)data_; tree idx = get_template_parm_index (t); if (TEMPLATE_PARM_LEVEL (idx) == data.level) data.seen[TEMPLATE_PARM_IDX (idx)] = true; return 0; } /* for_each_template_parm any_fn callback for complex_alias_template_p. */ static int complex_pack_expansion_r (tree t, void *data_) { /* An alias template with a pack expansion that expands a pack from the enclosing class needs to be considered complex, to avoid confusion with the same pack being used as an argument to the alias's own template parameter (91966). */ if (!PACK_EXPANSION_P (t)) return 0; struct uses_all_template_parms_data &data = *(struct uses_all_template_parms_data*)data_; for (tree pack = PACK_EXPANSION_PARAMETER_PACKS (t); pack; pack = TREE_CHAIN (pack)) { tree parm_pack = TREE_VALUE (pack); if (!TEMPLATE_PARM_P (parm_pack)) continue; int idx, level; template_parm_level_and_index (parm_pack, &level, &idx); if (level < data.level) return 1; } return 0; } static bool complex_alias_template_p (const_tree tmpl) { /* A renaming alias isn't complex. */ if (get_underlying_template (CONST_CAST_TREE (tmpl)) != tmpl) return false; /* Any other constrained alias is complex. */ if (get_constraints (tmpl)) return true; struct uses_all_template_parms_data data; tree pat = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); tree parms = DECL_TEMPLATE_PARMS (tmpl); data.level = TMPL_PARMS_DEPTH (parms); int len = TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (parms)); data.seen = XALLOCAVEC (bool, len); for (int i = 0; i < len; ++i) data.seen[i] = false; if (for_each_template_parm (pat, uses_all_template_parms_r, &data, NULL, true, complex_pack_expansion_r)) return true; for (int i = 0; i < len; ++i) if (!data.seen[i]) return true; return false; } /* If T is a specialization of a complex alias template with dependent template-arguments, return it; otherwise return NULL_TREE. If T is a typedef to such a specialization, return the specialization. */ tree dependent_alias_template_spec_p (const_tree t, bool transparent_typedefs) { if (!TYPE_P (t) || !typedef_variant_p (t)) return NULL_TREE; tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t); if (tinfo && TEMPLATE_DECL_COMPLEX_ALIAS_P (TI_TEMPLATE (tinfo)) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo))))) return CONST_CAST_TREE (t); if (transparent_typedefs) { tree utype = DECL_ORIGINAL_TYPE (TYPE_NAME (t)); return dependent_alias_template_spec_p (utype, transparent_typedefs); } return NULL_TREE; } /* Return the number of innermost template parameters in TMPL. */ static int num_innermost_template_parms (const_tree tmpl) { tree parms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl)); return TREE_VEC_LENGTH (parms); } /* Return either TMPL or another template that it is equivalent to under DR 1286: An alias that just changes the name of a template is equivalent to the other template. */ static tree get_underlying_template (tree tmpl) { gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL); while (DECL_ALIAS_TEMPLATE_P (tmpl)) { /* Determine if the alias is equivalent to an underlying template. */ tree orig_type = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); /* The underlying type may have been ill-formed. Don't proceed. */ if (!orig_type) break; tree tinfo = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (orig_type); if (!tinfo) break; tree underlying = TI_TEMPLATE (tinfo); if (!PRIMARY_TEMPLATE_P (underlying) || (num_innermost_template_parms (tmpl) != num_innermost_template_parms (underlying))) break; tree alias_args = INNERMOST_TEMPLATE_ARGS (generic_targs_for (tmpl)); if (!comp_template_args (TI_ARGS (tinfo), alias_args)) break; /* If TMPL adds or changes any constraints, it isn't equivalent. I think it's appropriate to treat a less-constrained alias as equivalent. */ if (!at_least_as_constrained (underlying, tmpl)) break; /* Alias is equivalent. Strip it and repeat. */ tmpl = underlying; } return tmpl; } /* Subroutine of convert_nontype_argument. Converts EXPR to TYPE, which must be a reference-to-function or a pointer-to-function type, as specified in [temp.arg.nontype]: disambiguate EXPR if it is an overload set, and check that the resulting function has external linkage. */ static tree convert_nontype_argument_function (tree type, tree expr, tsubst_flags_t complain) { tree fns = expr; tree fn, fn_no_ptr; linkage_kind linkage; fn = instantiate_type (type, fns, tf_none); if (fn == error_mark_node) return error_mark_node; if (value_dependent_expression_p (fn)) goto accept; fn_no_ptr = strip_fnptr_conv (fn); if (TREE_CODE (fn_no_ptr) == ADDR_EXPR) fn_no_ptr = TREE_OPERAND (fn_no_ptr, 0); if (BASELINK_P (fn_no_ptr)) fn_no_ptr = BASELINK_FUNCTIONS (fn_no_ptr); /* [temp.arg.nontype]/1 A template-argument for a non-type, non-template template-parameter shall be one of: [...] -- the address of an object or function with external [C++11: or internal] linkage. */ STRIP_ANY_LOCATION_WRAPPER (fn_no_ptr); if (TREE_CODE (fn_no_ptr) != FUNCTION_DECL) { if (complain & tf_error) { location_t loc = cp_expr_loc_or_input_loc (expr); error_at (loc, "%qE is not a valid template argument for type %qT", expr, type); if (TYPE_PTR_P (type)) inform (loc, "it must be the address of a function " "with external linkage"); else inform (loc, "it must be the name of a function with " "external linkage"); } return NULL_TREE; } linkage = decl_linkage (fn_no_ptr); if (cxx_dialect >= cxx11 ? linkage == lk_none : linkage != lk_external) { if (complain & tf_error) { location_t loc = cp_expr_loc_or_input_loc (expr); if (cxx_dialect >= cxx11) error_at (loc, "%qE is not a valid template argument for type " "%qT because %qD has no linkage", expr, type, fn_no_ptr); else error_at (loc, "%qE is not a valid template argument for type " "%qT because %qD does not have external linkage", expr, type, fn_no_ptr); } return NULL_TREE; } accept: if (TYPE_REF_P (type)) { if (REFERENCE_REF_P (fn)) fn = TREE_OPERAND (fn, 0); else fn = build_address (fn); } if (!same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (fn))) fn = build_nop (type, fn); return fn; } /* Subroutine of convert_nontype_argument. Check if EXPR of type TYPE is a valid pointer-to-member constant. Emit an error otherwise. */ static bool check_valid_ptrmem_cst_expr (tree type, tree expr, tsubst_flags_t complain) { tree orig_expr = expr; STRIP_NOPS (expr); if (null_ptr_cst_p (expr)) return true; if (TREE_CODE (expr) == PTRMEM_CST && same_type_p (TYPE_PTRMEM_CLASS_TYPE (type), PTRMEM_CST_CLASS (expr))) return true; if (cxx_dialect >= cxx11 && null_member_pointer_value_p (expr)) return true; if (processing_template_decl && TREE_CODE (expr) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (expr, 0)) == OFFSET_REF) return true; if (complain & tf_error) { location_t loc = cp_expr_loc_or_input_loc (orig_expr); error_at (loc, "%qE is not a valid template argument for type %qT", orig_expr, type); if (TREE_CODE (expr) != PTRMEM_CST) inform (loc, "it must be a pointer-to-member of the form %<&X::Y%>"); else inform (loc, "because it is a member of %qT", PTRMEM_CST_CLASS (expr)); } return false; } /* Returns TRUE iff the address of OP is value-dependent. 14.6.2.4 [temp.dep.temp]: A non-integral non-type template-argument is dependent if its type is dependent or it has either of the following forms qualified-id & qualified-id and contains a nested-name-specifier which specifies a class-name that names a dependent type. We generalize this to just say that the address of a member of a dependent class is value-dependent; the above doesn't cover the address of a static data member named with an unqualified-id. */ static bool has_value_dependent_address (tree op) { STRIP_ANY_LOCATION_WRAPPER (op); /* We could use get_inner_reference here, but there's no need; this is only relevant for template non-type arguments, which can only be expressed as &id-expression. */ if (DECL_P (op)) { tree ctx = CP_DECL_CONTEXT (op); if (TYPE_P (ctx) && dependent_type_p (ctx)) return true; } return false; } /* The next set of functions are used for providing helpful explanatory diagnostics for failed overload resolution. Their messages should be indented by two spaces for consistency with the messages in call.c */ static int unify_success (bool /*explain_p*/) { return 0; } /* Other failure functions should call this one, to provide a single function for setting a breakpoint on. */ static int unify_invalid (bool /*explain_p*/) { return 1; } static int unify_parameter_deduction_failure (bool explain_p, tree parm) { if (explain_p) inform (input_location, " couldn%'t deduce template parameter %qD", parm); return unify_invalid (explain_p); } static int unify_cv_qual_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " types %qT and %qT have incompatible cv-qualifiers", parm, arg); return unify_invalid (explain_p); } static int unify_type_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " mismatched types %qT and %qT", parm, arg); return unify_invalid (explain_p); } static int unify_parameter_pack_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " template parameter %qD is not a parameter pack, but " "argument %qD is", parm, arg); return unify_invalid (explain_p); } static int unify_ptrmem_cst_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " template argument %qE does not match " "pointer-to-member constant %qE", arg, parm); return unify_invalid (explain_p); } static int unify_expression_unequal (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " %qE is not equivalent to %qE", parm, arg); return unify_invalid (explain_p); } static int unify_parameter_pack_inconsistent (bool explain_p, tree old_arg, tree new_arg) { if (explain_p) inform (input_location, " inconsistent parameter pack deduction with %qT and %qT", old_arg, new_arg); return unify_invalid (explain_p); } static int unify_inconsistency (bool explain_p, tree parm, tree first, tree second) { if (explain_p) { if (TYPE_P (parm)) inform (input_location, " deduced conflicting types for parameter %qT (%qT and %qT)", parm, first, second); else inform (input_location, " deduced conflicting values for non-type parameter " "%qE (%qE and %qE)", parm, first, second); } return unify_invalid (explain_p); } static int unify_vla_arg (bool explain_p, tree arg) { if (explain_p) inform (input_location, " variable-sized array type %qT is not " "a valid template argument", arg); return unify_invalid (explain_p); } static int unify_method_type_error (bool explain_p, tree arg) { if (explain_p) inform (input_location, " member function type %qT is not a valid template argument", arg); return unify_invalid (explain_p); } static int unify_arity (bool explain_p, int have, int wanted, bool least_p = false) { if (explain_p) { if (least_p) inform_n (input_location, wanted, " candidate expects at least %d argument, %d provided", " candidate expects at least %d arguments, %d provided", wanted, have); else inform_n (input_location, wanted, " candidate expects %d argument, %d provided", " candidate expects %d arguments, %d provided", wanted, have); } return unify_invalid (explain_p); } static int unify_too_many_arguments (bool explain_p, int have, int wanted) { return unify_arity (explain_p, have, wanted); } static int unify_too_few_arguments (bool explain_p, int have, int wanted, bool least_p = false) { return unify_arity (explain_p, have, wanted, least_p); } static int unify_arg_conversion (bool explain_p, tree to_type, tree from_type, tree arg) { if (explain_p) inform (cp_expr_loc_or_input_loc (arg), " cannot convert %qE (type %qT) to type %qT", arg, from_type, to_type); return unify_invalid (explain_p); } static int unify_no_common_base (bool explain_p, enum template_base_result r, tree parm, tree arg) { if (explain_p) switch (r) { case tbr_ambiguous_baseclass: inform (input_location, " %qT is an ambiguous base class of %qT", parm, arg); break; default: inform (input_location, " %qT is not derived from %qT", arg, parm); break; } return unify_invalid (explain_p); } static int unify_inconsistent_template_template_parameters (bool explain_p) { if (explain_p) inform (input_location, " template parameters of a template template argument are " "inconsistent with other deduced template arguments"); return unify_invalid (explain_p); } static int unify_template_deduction_failure (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " cannot deduce a template for %qT from non-template type %qT", parm, arg); return unify_invalid (explain_p); } static int unify_template_argument_mismatch (bool explain_p, tree parm, tree arg) { if (explain_p) inform (input_location, " template argument %qE does not match %qE", arg, parm); return unify_invalid (explain_p); } /* True if T is a C++20 template parameter object to store the argument for a template parameter of class type. */ bool template_parm_object_p (const_tree t) { return (TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t) && DECL_NAME (t) && !strncmp (IDENTIFIER_POINTER (DECL_NAME (t)), "_ZTA", 4)); } /* Subroutine of convert_nontype_argument, to check whether EXPR, as an argument for TYPE, points to an unsuitable object. Also adjust the type of the index in C++20 array subobject references. */ static bool invalid_tparm_referent_p (tree type, tree expr, tsubst_flags_t complain) { switch (TREE_CODE (expr)) { CASE_CONVERT: return invalid_tparm_referent_p (type, TREE_OPERAND (expr, 0), complain); case TARGET_EXPR: return invalid_tparm_referent_p (type, TARGET_EXPR_INITIAL (expr), complain); case CONSTRUCTOR: { unsigned i; tree elt; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expr), i, elt) if (invalid_tparm_referent_p (TREE_TYPE (elt), elt, complain)) return true; } break; case ADDR_EXPR: { tree decl = TREE_OPERAND (expr, 0); if (cxx_dialect >= cxx20) while (TREE_CODE (decl) == COMPONENT_REF || TREE_CODE (decl) == ARRAY_REF) { tree &op = TREE_OPERAND (decl, 1); if (TREE_CODE (decl) == ARRAY_REF && TREE_CODE (op) == INTEGER_CST) /* Canonicalize array offsets to ptrdiff_t; how they were written doesn't matter for subobject identity. */ op = fold_convert (ptrdiff_type_node, op); decl = TREE_OPERAND (decl, 0); } if (!VAR_P (decl)) { if (complain & tf_error) error_at (cp_expr_loc_or_input_loc (expr), "%qE is not a valid template argument of type %qT " "because %qE is not a variable", expr, type, decl); return true; } else if (cxx_dialect < cxx11 && !DECL_EXTERNAL_LINKAGE_P (decl)) { if (complain & tf_error) error_at (cp_expr_loc_or_input_loc (expr), "%qE is not a valid template argument of type %qT " "in C++98 because %qD does not have external linkage", expr, type, decl); return true; } else if ((cxx_dialect >= cxx11 && cxx_dialect < cxx17) && decl_linkage (decl) == lk_none) { if (complain & tf_error) error_at (cp_expr_loc_or_input_loc (expr), "%qE is not a valid template argument of type %qT " "because %qD has no linkage", expr, type, decl); return true; } /* C++17: For a non-type template-parameter of reference or pointer type, the value of the constant expression shall not refer to (or for a pointer type, shall not be the address of): * a subobject (4.5), * a temporary object (15.2), * a string literal (5.13.5), * the result of a typeid expression (8.2.8), or * a predefined __func__ variable (11.4.1). */ else if (DECL_ARTIFICIAL (decl)) { if (complain & tf_error) error ("the address of %qD is not a valid template argument", decl); return true; } else if (cxx_dialect < cxx20 && !(same_type_ignoring_top_level_qualifiers_p (strip_array_types (TREE_TYPE (type)), strip_array_types (TREE_TYPE (decl))))) { if (complain & tf_error) error ("the address of the %qT subobject of %qD is not a " "valid template argument", TREE_TYPE (type), decl); return true; } else if (!TREE_STATIC (decl) && !DECL_EXTERNAL (decl)) { if (complain & tf_error) error ("the address of %qD is not a valid template argument " "because it does not have static storage duration", decl); return true; } } break; default: if (!INDIRECT_TYPE_P (type)) /* We're only concerned about pointers and references here. */; else if (cxx_dialect >= cxx11 && integer_zerop (expr)) /* Null pointer values are OK in C++11. */; else { if (VAR_P (expr)) { if (complain & tf_error) error ("%qD is not a valid template argument " "because %qD is a variable, not the address of " "a variable", expr, expr); return true; } else { if (complain & tf_error) error ("%qE is not a valid template argument for %qT " "because it is not the address of a variable", expr, type); return true; } } } return false; } /* The template arguments corresponding to template parameter objects of types that contain pointers to members. */ static GTY(()) hash_map<tree, tree> *tparm_obj_values; /* Return a VAR_DECL for the C++20 template parameter object corresponding to template argument EXPR. */ static tree get_template_parm_object (tree expr, tsubst_flags_t complain) { if (TREE_CODE (expr) == TARGET_EXPR) expr = TARGET_EXPR_INITIAL (expr); if (!TREE_CONSTANT (expr)) { if ((complain & tf_error) && require_rvalue_constant_expression (expr)) cxx_constant_value (expr); return error_mark_node; } if (invalid_tparm_referent_p (TREE_TYPE (expr), expr, complain)) return error_mark_node; tree name = mangle_template_parm_object (expr); tree decl = get_global_binding (name); if (decl) return decl; tree type = cp_build_qualified_type (TREE_TYPE (expr), TYPE_QUAL_CONST); decl = create_temporary_var (type); DECL_CONTEXT (decl) = NULL_TREE; TREE_STATIC (decl) = true; DECL_DECLARED_CONSTEXPR_P (decl) = true; TREE_READONLY (decl) = true; DECL_NAME (decl) = name; SET_DECL_ASSEMBLER_NAME (decl, name); comdat_linkage (decl); if (!zero_init_p (type)) { /* If EXPR contains any PTRMEM_CST, they will get clobbered by lower_var_init before we're done mangling. So store the original value elsewhere. */ tree copy = unshare_constructor (expr); hash_map_safe_put<hm_ggc> (tparm_obj_values, decl, copy); } pushdecl_top_level_and_finish (decl, expr); return decl; } /* Return the actual template argument corresponding to template parameter object VAR. */ tree tparm_object_argument (tree var) { if (zero_init_p (TREE_TYPE (var))) return DECL_INITIAL (var); return *(tparm_obj_values->get (var)); } /* Attempt to convert the non-type template parameter EXPR to the indicated TYPE. If the conversion is successful, return the converted value. If the conversion is unsuccessful, return NULL_TREE if we issued an error message, or error_mark_node if we did not. We issue error messages for out-and-out bad template parameters, but not simply because the conversion failed, since we might be just trying to do argument deduction. Both TYPE and EXPR must be non-dependent. The conversion follows the special rules described in [temp.arg.nontype], and it is much more strict than an implicit conversion. This function is called twice for each template argument (see lookup_template_class for a more accurate description of this problem). This means that we need to handle expressions which are not valid in a C++ source, but can be created from the first call (for instance, casts to perform conversions). These hacks can go away after we fix the double coercion problem. */ static tree convert_nontype_argument (tree type, tree expr, tsubst_flags_t complain) { tree expr_type; location_t loc = cp_expr_loc_or_input_loc (expr); /* Detect immediately string literals as invalid non-type argument. This special-case is not needed for correctness (we would easily catch this later), but only to provide better diagnostic for this common user mistake. As suggested by DR 100, we do not mention linkage issues in the diagnostic as this is not the point. */ if (TREE_CODE (expr) == STRING_CST && !CLASS_TYPE_P (type)) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because string literals can never be used in this context", expr, type); return NULL_TREE; } /* Add the ADDR_EXPR now for the benefit of value_dependent_expression_p. */ if (TYPE_PTROBV_P (type) && TREE_CODE (TREE_TYPE (expr)) == ARRAY_TYPE) { expr = decay_conversion (expr, complain); if (expr == error_mark_node) return error_mark_node; } /* If we are in a template, EXPR may be non-dependent, but still have a syntactic, rather than semantic, form. For example, EXPR might be a SCOPE_REF, rather than the VAR_DECL to which the SCOPE_REF refers. Preserving the qualifying scope is necessary so that access checking can be performed when the template is instantiated -- but here we need the resolved form so that we can convert the argument. */ bool non_dep = false; if (TYPE_REF_OBJ_P (type) && has_value_dependent_address (expr)) /* If we want the address and it's value-dependent, don't fold. */; else if (processing_template_decl && is_nondependent_constant_expression (expr)) non_dep = true; if (error_operand_p (expr)) return error_mark_node; expr_type = TREE_TYPE (expr); /* If the argument is non-dependent, perform any conversions in non-dependent context as well. */ processing_template_decl_sentinel s (non_dep); if (non_dep) expr = instantiate_non_dependent_expr_internal (expr, complain); const bool val_dep_p = value_dependent_expression_p (expr); if (val_dep_p) expr = canonicalize_expr_argument (expr, complain); /* 14.3.2/5: The null pointer{,-to-member} conversion is applied to a non-type argument of "nullptr". */ if (NULLPTR_TYPE_P (expr_type) && TYPE_PTR_OR_PTRMEM_P (type)) expr = fold_simple (convert (type, expr)); /* In C++11, integral or enumeration non-type template arguments can be arbitrary constant expressions. Pointer and pointer to member arguments can be general constant expressions that evaluate to a null value, but otherwise still need to be of a specific form. */ if (cxx_dialect >= cxx11) { if (TREE_CODE (expr) == PTRMEM_CST && TYPE_PTRMEM_P (type)) /* A PTRMEM_CST is already constant, and a valid template argument for a parameter of pointer to member type, we just want to leave it in that form rather than lower it to a CONSTRUCTOR. */; else if (INTEGRAL_OR_ENUMERATION_TYPE_P (type) || cxx_dialect >= cxx17) { /* C++17: A template-argument for a non-type template-parameter shall be a converted constant expression (8.20) of the type of the template-parameter. */ expr = build_converted_constant_expr (type, expr, complain); if (expr == error_mark_node) /* Make sure we return NULL_TREE only if we have really issued an error, as described above. */ return (complain & tf_error) ? NULL_TREE : error_mark_node; else if (TREE_CODE (expr) == IMPLICIT_CONV_EXPR) { IMPLICIT_CONV_EXPR_NONTYPE_ARG (expr) = true; return expr; } expr = maybe_constant_value (expr, NULL_TREE, /*manifestly_const_eval=*/true); expr = convert_from_reference (expr); } else if (TYPE_PTR_OR_PTRMEM_P (type)) { tree folded = maybe_constant_value (expr, NULL_TREE, /*manifestly_const_eval=*/true); if (TYPE_PTR_P (type) ? integer_zerop (folded) : null_member_pointer_value_p (folded)) expr = folded; } } if (TYPE_REF_P (type)) expr = mark_lvalue_use (expr); else expr = mark_rvalue_use (expr); /* HACK: Due to double coercion, we can get a NOP_EXPR<REFERENCE_TYPE>(ADDR_EXPR<POINTER_TYPE> (arg)) here, which is the tree that we built on the first call (see below when coercing to reference to object or to reference to function). We just strip everything and get to the arg. See g++.old-deja/g++.oliva/template4.C and g++.dg/template/nontype9.C for examples. */ if (TYPE_REF_OBJ_P (type) || TYPE_REFFN_P (type)) { tree probe_type, probe = expr; if (REFERENCE_REF_P (probe)) probe = TREE_OPERAND (probe, 0); probe_type = TREE_TYPE (probe); if (TREE_CODE (probe) == NOP_EXPR) { /* ??? Maybe we could use convert_from_reference here, but we would need to relax its constraints because the NOP_EXPR could actually change the type to something more cv-qualified, and this is not folded by convert_from_reference. */ tree addr = TREE_OPERAND (probe, 0); if (TYPE_REF_P (probe_type) && TREE_CODE (addr) == ADDR_EXPR && TYPE_PTR_P (TREE_TYPE (addr)) && (same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (probe_type), TREE_TYPE (TREE_TYPE (addr))))) { expr = TREE_OPERAND (addr, 0); expr_type = TREE_TYPE (probe_type); } } } /* [temp.arg.nontype]/5, bullet 1 For a non-type template-parameter of integral or enumeration type, integral promotions (_conv.prom_) and integral conversions (_conv.integral_) are applied. */ if (INTEGRAL_OR_ENUMERATION_TYPE_P (type) || TREE_CODE (type) == REAL_TYPE) { if (cxx_dialect < cxx11) { tree t = build_converted_constant_expr (type, expr, complain); t = maybe_constant_value (t); if (t != error_mark_node) expr = t; } if (!same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (expr))) return error_mark_node; /* Notice that there are constant expressions like '4 % 0' which do not fold into integer constants. */ if (!CONSTANT_CLASS_P (expr) && !val_dep_p) { if (complain & tf_error) { int errs = errorcount, warns = warningcount + werrorcount; if (!require_potential_constant_expression (expr)) expr = error_mark_node; else expr = cxx_constant_value (expr); if (errorcount > errs || warningcount + werrorcount > warns) inform (loc, "in template argument for type %qT", type); if (expr == error_mark_node) return NULL_TREE; /* else cxx_constant_value complained but gave us a real constant, so go ahead. */ if (!CONSTANT_CLASS_P (expr)) { /* Some assemble time constant expressions like (intptr_t)&&lab1 - (intptr_t)&&lab2 or 4 + (intptr_t)&&var satisfy reduced_constant_expression_p as we can emit them into .rodata initializers of variables, yet they can't fold into an INTEGER_CST at compile time. Refuse them here. */ gcc_checking_assert (reduced_constant_expression_p (expr)); error_at (loc, "template argument %qE for type %qT not " "a compile-time constant", expr, type); return NULL_TREE; } } else return NULL_TREE; } /* Avoid typedef problems. */ if (TREE_TYPE (expr) != type) expr = fold_convert (type, expr); } /* [temp.arg.nontype]/5, bullet 2 For a non-type template-parameter of type pointer to object, qualification conversions (_conv.qual_) and the array-to-pointer conversion (_conv.array_) are applied. */ else if (TYPE_PTROBV_P (type)) { tree decayed = expr; /* Look through any NOP_EXPRs around an ADDR_EXPR, whether they come from decay_conversion or an explicit cast. If it's a problematic cast, we'll complain about it below. */ if (TREE_CODE (expr) == NOP_EXPR) { tree probe = expr; STRIP_NOPS (probe); if (TREE_CODE (probe) == ADDR_EXPR && TYPE_PTR_P (TREE_TYPE (probe))) { expr = probe; expr_type = TREE_TYPE (expr); } } /* [temp.arg.nontype]/1 (TC1 version, DR 49): A template-argument for a non-type, non-template template-parameter shall be one of: [...] -- the name of a non-type template-parameter; -- the address of an object or function with external linkage, [...] expressed as "& id-expression" where the & is optional if the name refers to a function or array, or if the corresponding template-parameter is a reference. Here, we do not care about functions, as they are invalid anyway for a parameter of type pointer-to-object. */ if (val_dep_p) /* Non-type template parameters are OK. */ ; else if (cxx_dialect >= cxx11 && integer_zerop (expr)) /* Null pointer values are OK in C++11. */; else if (TREE_CODE (expr) != ADDR_EXPR && !INDIRECT_TYPE_P (expr_type)) /* Other values, like integer constants, might be valid non-type arguments of some other type. */ return error_mark_node; else if (invalid_tparm_referent_p (type, expr, complain)) return NULL_TREE; expr = decayed; expr = perform_qualification_conversions (type, expr); if (expr == error_mark_node) return error_mark_node; } /* [temp.arg.nontype]/5, bullet 3 For a non-type template-parameter of type reference to object, no conversions apply. The type referred to by the reference may be more cv-qualified than the (otherwise identical) type of the template-argument. The template-parameter is bound directly to the template-argument, which must be an lvalue. */ else if (TYPE_REF_OBJ_P (type)) { if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (type), expr_type)) return error_mark_node; if (!at_least_as_qualified_p (TREE_TYPE (type), expr_type)) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because of conflicts in cv-qualification", expr, type); return NULL_TREE; } if (!lvalue_p (expr)) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because it is not an lvalue", expr, type); return NULL_TREE; } /* [temp.arg.nontype]/1 A template-argument for a non-type, non-template template-parameter shall be one of: [...] -- the address of an object or function with external linkage. */ if (INDIRECT_REF_P (expr) && TYPE_REF_OBJ_P (TREE_TYPE (TREE_OPERAND (expr, 0)))) { expr = TREE_OPERAND (expr, 0); if (DECL_P (expr)) { if (complain & tf_error) error ("%q#D is not a valid template argument for type %qT " "because a reference variable does not have a constant " "address", expr, type); return NULL_TREE; } } if (TYPE_REF_OBJ_P (TREE_TYPE (expr)) && val_dep_p) /* OK, dependent reference. We don't want to ask whether a DECL is itself value-dependent, since what we want here is its address. */; else { expr = build_address (expr); if (invalid_tparm_referent_p (type, expr, complain)) return NULL_TREE; } if (!same_type_p (type, TREE_TYPE (expr))) expr = build_nop (type, expr); } /* [temp.arg.nontype]/5, bullet 4 For a non-type template-parameter of type pointer to function, only the function-to-pointer conversion (_conv.func_) is applied. If the template-argument represents a set of overloaded functions (or a pointer to such), the matching function is selected from the set (_over.over_). */ else if (TYPE_PTRFN_P (type)) { /* If the argument is a template-id, we might not have enough context information to decay the pointer. */ if (!type_unknown_p (expr_type)) { expr = decay_conversion (expr, complain); if (expr == error_mark_node) return error_mark_node; } if (cxx_dialect >= cxx11 && integer_zerop (expr)) /* Null pointer values are OK in C++11. */ return perform_qualification_conversions (type, expr); expr = convert_nontype_argument_function (type, expr, complain); if (!expr || expr == error_mark_node) return expr; } /* [temp.arg.nontype]/5, bullet 5 For a non-type template-parameter of type reference to function, no conversions apply. If the template-argument represents a set of overloaded functions, the matching function is selected from the set (_over.over_). */ else if (TYPE_REFFN_P (type)) { if (TREE_CODE (expr) == ADDR_EXPR) { if (complain & tf_error) { error ("%qE is not a valid template argument for type %qT " "because it is a pointer", expr, type); inform (input_location, "try using %qE instead", TREE_OPERAND (expr, 0)); } return NULL_TREE; } expr = convert_nontype_argument_function (type, expr, complain); if (!expr || expr == error_mark_node) return expr; } /* [temp.arg.nontype]/5, bullet 6 For a non-type template-parameter of type pointer to member function, no conversions apply. If the template-argument represents a set of overloaded member functions, the matching member function is selected from the set (_over.over_). */ else if (TYPE_PTRMEMFUNC_P (type)) { expr = instantiate_type (type, expr, tf_none); if (expr == error_mark_node) return error_mark_node; /* [temp.arg.nontype] bullet 1 says the pointer to member expression must be a pointer-to-member constant. */ if (!val_dep_p && !check_valid_ptrmem_cst_expr (type, expr, complain)) return NULL_TREE; /* Repeated conversion can't deal with a conversion that turns PTRMEM_CST into a CONSTRUCTOR, so build up a new PTRMEM_CST instead. */ if (fnptr_conv_p (type, TREE_TYPE (expr))) expr = make_ptrmem_cst (type, PTRMEM_CST_MEMBER (expr)); } /* [temp.arg.nontype]/5, bullet 7 For a non-type template-parameter of type pointer to data member, qualification conversions (_conv.qual_) are applied. */ else if (TYPE_PTRDATAMEM_P (type)) { /* [temp.arg.nontype] bullet 1 says the pointer to member expression must be a pointer-to-member constant. */ if (!val_dep_p && !check_valid_ptrmem_cst_expr (type, expr, complain)) return NULL_TREE; expr = perform_qualification_conversions (type, expr); if (expr == error_mark_node) return expr; } else if (NULLPTR_TYPE_P (type)) { if (!NULLPTR_TYPE_P (TREE_TYPE (expr))) { if (complain & tf_error) error ("%qE is not a valid template argument for type %qT " "because it is of type %qT", expr, type, TREE_TYPE (expr)); return NULL_TREE; } return expr; } else if (CLASS_TYPE_P (type)) { /* Replace the argument with a reference to the corresponding template parameter object. */ if (!val_dep_p) expr = get_template_parm_object (expr, complain); if (expr == error_mark_node) return NULL_TREE; } /* A template non-type parameter must be one of the above. */ else gcc_unreachable (); /* Sanity check: did we actually convert the argument to the right type? */ gcc_assert (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (expr))); return convert_from_reference (expr); } /* Subroutine of coerce_template_template_parms, which returns 1 if PARM_PARM and ARG_PARM match using the rule for the template parameters of template template parameters. Both PARM and ARG are template parameters; the rest of the arguments are the same as for coerce_template_template_parms. */ static int coerce_template_template_parm (tree parm, tree arg, tsubst_flags_t complain, tree in_decl, tree outer_args) { if (arg == NULL_TREE || error_operand_p (arg) || parm == NULL_TREE || error_operand_p (parm)) return 0; if (TREE_CODE (arg) != TREE_CODE (parm)) return 0; switch (TREE_CODE (parm)) { case TEMPLATE_DECL: /* We encounter instantiations of templates like template <template <template <class> class> class TT> class C; */ { tree parmparm = DECL_INNERMOST_TEMPLATE_PARMS (parm); tree argparm = DECL_INNERMOST_TEMPLATE_PARMS (arg); if (!coerce_template_template_parms (parmparm, argparm, complain, in_decl, outer_args)) return 0; } /* Fall through. */ case TYPE_DECL: if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (arg)) && !TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm))) /* Argument is a parameter pack but parameter is not. */ return 0; break; case PARM_DECL: /* The tsubst call is used to handle cases such as template <int> class C {}; template <class T, template <T> class TT> class D {}; D<int, C> d; i.e. the parameter list of TT depends on earlier parameters. */ if (!uses_template_parms (TREE_TYPE (arg))) { tree t = tsubst (TREE_TYPE (parm), outer_args, complain, in_decl); if (!uses_template_parms (t) && !same_type_p (t, TREE_TYPE (arg))) return 0; } if (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (arg)) && !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))) /* Argument is a parameter pack but parameter is not. */ return 0; break; default: gcc_unreachable (); } return 1; } /* Coerce template argument list ARGLIST for use with template template-parameter TEMPL. */ static tree coerce_template_args_for_ttp (tree templ, tree arglist, tsubst_flags_t complain) { /* Consider an example where a template template parameter declared as template <class T, class U = std::allocator<T> > class TT The template parameter level of T and U are one level larger than of TT. To proper process the default argument of U, say when an instantiation `TT<int>' is seen, we need to build the full arguments containing {int} as the innermost level. Outer levels, available when not appearing as default template argument, can be obtained from the arguments of the enclosing template. Suppose that TT is later substituted with std::vector. The above instantiation is `TT<int, std::allocator<T> >' with TT at level 1, and T at level 2, while the template arguments at level 1 becomes {std::vector} and the inner level 2 is {int}. */ tree outer = DECL_CONTEXT (templ); if (outer) outer = generic_targs_for (outer); else if (current_template_parms) { /* This is an argument of the current template, so we haven't set DECL_CONTEXT yet. */ tree relevant_template_parms; /* Parameter levels that are greater than the level of the given template template parm are irrelevant. */ relevant_template_parms = current_template_parms; while (TMPL_PARMS_DEPTH (relevant_template_parms) != TEMPLATE_TYPE_LEVEL (TREE_TYPE (templ))) relevant_template_parms = TREE_CHAIN (relevant_template_parms); outer = template_parms_to_args (relevant_template_parms); } if (outer) arglist = add_to_template_args (outer, arglist); tree parmlist = DECL_INNERMOST_TEMPLATE_PARMS (templ); return coerce_template_parms (parmlist, arglist, templ, complain, /*require_all_args=*/true, /*use_default_args=*/true); } /* A cache of template template parameters with match-all default arguments. */ static GTY((deletable)) hash_map<tree,tree> *defaulted_ttp_cache; /* T is a bound template template-parameter. Copy its arguments into default arguments of the template template-parameter's template parameters. */ static tree add_defaults_to_ttp (tree otmpl) { if (tree *c = hash_map_safe_get (defaulted_ttp_cache, otmpl)) return *c; tree ntmpl = copy_node (otmpl); tree ntype = copy_node (TREE_TYPE (otmpl)); TYPE_STUB_DECL (ntype) = TYPE_NAME (ntype) = ntmpl; TYPE_MAIN_VARIANT (ntype) = ntype; TYPE_POINTER_TO (ntype) = TYPE_REFERENCE_TO (ntype) = NULL_TREE; TYPE_NAME (ntype) = ntmpl; SET_TYPE_STRUCTURAL_EQUALITY (ntype); tree idx = TEMPLATE_TYPE_PARM_INDEX (ntype) = copy_node (TEMPLATE_TYPE_PARM_INDEX (ntype)); TEMPLATE_PARM_DECL (idx) = ntmpl; TREE_TYPE (ntmpl) = TREE_TYPE (idx) = ntype; tree oparms = DECL_TEMPLATE_PARMS (otmpl); tree parms = DECL_TEMPLATE_PARMS (ntmpl) = copy_node (oparms); TREE_CHAIN (parms) = TREE_CHAIN (oparms); tree vec = TREE_VALUE (parms) = copy_node (TREE_VALUE (parms)); for (int i = 0; i < TREE_VEC_LENGTH (vec); ++i) { tree o = TREE_VEC_ELT (vec, i); if (!template_parameter_pack_p (TREE_VALUE (o))) { tree n = TREE_VEC_ELT (vec, i) = copy_node (o); TREE_PURPOSE (n) = any_targ_node; } } hash_map_safe_put<hm_ggc> (defaulted_ttp_cache, otmpl, ntmpl); return ntmpl; } /* ARG is a bound potential template template-argument, and PARGS is a list of arguments for the corresponding template template-parameter. Adjust PARGS as appropriate for application to ARG's template, and if ARG is a BOUND_TEMPLATE_TEMPLATE_PARM, possibly adjust it to add default template arguments to the template template parameter. */ static tree coerce_ttp_args_for_tta (tree& arg, tree pargs, tsubst_flags_t complain) { ++processing_template_decl; tree arg_tmpl = TYPE_TI_TEMPLATE (arg); if (DECL_TEMPLATE_TEMPLATE_PARM_P (arg_tmpl)) { /* When comparing two template template-parameters in partial ordering, rewrite the one currently being used as an argument to have default arguments for all parameters. */ arg_tmpl = add_defaults_to_ttp (arg_tmpl); pargs = coerce_template_args_for_ttp (arg_tmpl, pargs, complain); if (pargs != error_mark_node) arg = bind_template_template_parm (TREE_TYPE (arg_tmpl), TYPE_TI_ARGS (arg)); } else { tree aparms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (arg_tmpl)); pargs = coerce_template_parms (aparms, pargs, arg_tmpl, complain, /*require_all*/true, /*use_default*/true); } --processing_template_decl; return pargs; } /* Subroutine of unify for the case when PARM is a BOUND_TEMPLATE_TEMPLATE_PARM. */ static int unify_bound_ttp_args (tree tparms, tree targs, tree parm, tree& arg, bool explain_p) { tree parmvec = TYPE_TI_ARGS (parm); tree argvec = INNERMOST_TEMPLATE_ARGS (TYPE_TI_ARGS (arg)); /* The template template parm might be variadic and the argument not, so flatten both argument lists. */ parmvec = expand_template_argument_pack (parmvec); argvec = expand_template_argument_pack (argvec); if (flag_new_ttp) { /* In keeping with P0522R0, adjust P's template arguments to apply to A's template; then flatten it again. */ tree nparmvec = coerce_ttp_args_for_tta (arg, parmvec, tf_none); nparmvec = expand_template_argument_pack (nparmvec); if (unify (tparms, targs, nparmvec, argvec, UNIFY_ALLOW_NONE, explain_p)) return 1; /* If the P0522 adjustment eliminated a pack expansion, deduce empty packs. */ if (flag_new_ttp && TREE_VEC_LENGTH (nparmvec) < TREE_VEC_LENGTH (parmvec) && unify_pack_expansion (tparms, targs, parmvec, argvec, DEDUCE_EXACT, /*sub*/true, explain_p)) return 1; } else { /* Deduce arguments T, i from TT<T> or TT<i>. We check each element of PARMVEC and ARGVEC individually rather than the whole TREE_VEC since they can have different number of elements, which is allowed under N2555. */ int len = TREE_VEC_LENGTH (parmvec); /* Check if the parameters end in a pack, making them variadic. */ int parm_variadic_p = 0; if (len > 0 && PACK_EXPANSION_P (TREE_VEC_ELT (parmvec, len - 1))) parm_variadic_p = 1; for (int i = 0; i < len - parm_variadic_p; ++i) /* If the template argument list of P contains a pack expansion that is not the last template argument, the entire template argument list is a non-deduced context. */ if (PACK_EXPANSION_P (TREE_VEC_ELT (parmvec, i))) return unify_success (explain_p); if (TREE_VEC_LENGTH (argvec) < len - parm_variadic_p) return unify_too_few_arguments (explain_p, TREE_VEC_LENGTH (argvec), len); for (int i = 0; i < len - parm_variadic_p; ++i) if (unify (tparms, targs, TREE_VEC_ELT (parmvec, i), TREE_VEC_ELT (argvec, i), UNIFY_ALLOW_NONE, explain_p)) return 1; if (parm_variadic_p && unify_pack_expansion (tparms, targs, parmvec, argvec, DEDUCE_EXACT, /*subr=*/true, explain_p)) return 1; } return 0; } /* Return 1 if PARM_PARMS and ARG_PARMS matches using rule for template template parameters. Both PARM_PARMS and ARG_PARMS are vectors of TREE_LIST nodes containing TYPE_DECL, TEMPLATE_DECL or PARM_DECL. Consider the example: template <class T> class A; template<template <class U> class TT> class B; For B<A>, PARM_PARMS are the parameters to TT, while ARG_PARMS are the parameters to A, and OUTER_ARGS contains A. */ static int coerce_template_template_parms (tree parm_parms, tree arg_parms, tsubst_flags_t complain, tree in_decl, tree outer_args) { int nparms, nargs, i; tree parm, arg; int variadic_p = 0; gcc_assert (TREE_CODE (parm_parms) == TREE_VEC); gcc_assert (TREE_CODE (arg_parms) == TREE_VEC); nparms = TREE_VEC_LENGTH (parm_parms); nargs = TREE_VEC_LENGTH (arg_parms); if (flag_new_ttp) { /* P0522R0: A template template-parameter P is at least as specialized as a template template-argument A if, given the following rewrite to two function templates, the function template corresponding to P is at least as specialized as the function template corresponding to A according to the partial ordering rules for function templates ([temp.func.order]). Given an invented class template X with the template parameter list of A (including default arguments): * Each of the two function templates has the same template parameters, respectively, as P or A. * Each function template has a single function parameter whose type is a specialization of X with template arguments corresponding to the template parameters from the respective function template where, for each template parameter PP in the template parameter list of the function template, a corresponding template argument AA is formed. If PP declares a parameter pack, then AA is the pack expansion PP... ([temp.variadic]); otherwise, AA is the id-expression PP. If the rewrite produces an invalid type, then P is not at least as specialized as A. */ /* So coerce P's args to apply to A's parms, and then deduce between A's args and the converted args. If that succeeds, A is at least as specialized as P, so they match.*/ tree pargs = template_parms_level_to_args (parm_parms); pargs = add_outermost_template_args (outer_args, pargs); ++processing_template_decl; pargs = coerce_template_parms (arg_parms, pargs, NULL_TREE, tf_none, /*require_all*/true, /*use_default*/true); --processing_template_decl; if (pargs != error_mark_node) { tree targs = make_tree_vec (nargs); tree aargs = template_parms_level_to_args (arg_parms); if (!unify (arg_parms, targs, aargs, pargs, UNIFY_ALLOW_NONE, /*explain*/false)) return 1; } } /* Determine whether we have a parameter pack at the end of the template template parameter's template parameter list. */ if (TREE_VEC_ELT (parm_parms, nparms - 1) != error_mark_node) { parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, nparms - 1)); if (error_operand_p (parm)) return 0; switch (TREE_CODE (parm)) { case TEMPLATE_DECL: case TYPE_DECL: if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (parm))) variadic_p = 1; break; case PARM_DECL: if (TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))) variadic_p = 1; break; default: gcc_unreachable (); } } if (nargs != nparms && !(variadic_p && nargs >= nparms - 1)) return 0; /* Check all of the template parameters except the parameter pack at the end (if any). */ for (i = 0; i < nparms - variadic_p; ++i) { if (TREE_VEC_ELT (parm_parms, i) == error_mark_node || TREE_VEC_ELT (arg_parms, i) == error_mark_node) continue; parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, i)); arg = TREE_VALUE (TREE_VEC_ELT (arg_parms, i)); if (!coerce_template_template_parm (parm, arg, complain, in_decl, outer_args)) return 0; } if (variadic_p) { /* Check each of the template parameters in the template argument against the template parameter pack at the end of the template template parameter. */ if (TREE_VEC_ELT (parm_parms, i) == error_mark_node) return 0; parm = TREE_VALUE (TREE_VEC_ELT (parm_parms, i)); for (; i < nargs; ++i) { if (TREE_VEC_ELT (arg_parms, i) == error_mark_node) continue; arg = TREE_VALUE (TREE_VEC_ELT (arg_parms, i)); if (!coerce_template_template_parm (parm, arg, complain, in_decl, outer_args)) return 0; } } return 1; } /* Verifies that the deduced template arguments (in TARGS) for the template template parameters (in TPARMS) represent valid bindings, by comparing the template parameter list of each template argument to the template parameter list of its corresponding template template parameter, in accordance with DR150. This routine can only be called after all template arguments have been deduced. It will return TRUE if all of the template template parameter bindings are okay, FALSE otherwise. */ bool template_template_parm_bindings_ok_p (tree tparms, tree targs) { int i, ntparms = TREE_VEC_LENGTH (tparms); bool ret = true; /* We're dealing with template parms in this process. */ ++processing_template_decl; targs = INNERMOST_TEMPLATE_ARGS (targs); for (i = 0; i < ntparms; ++i) { tree tparm = TREE_VALUE (TREE_VEC_ELT (tparms, i)); tree targ = TREE_VEC_ELT (targs, i); if (TREE_CODE (tparm) == TEMPLATE_DECL && targ) { tree packed_args = NULL_TREE; int idx, len = 1; if (ARGUMENT_PACK_P (targ)) { /* Look inside the argument pack. */ packed_args = ARGUMENT_PACK_ARGS (targ); len = TREE_VEC_LENGTH (packed_args); } for (idx = 0; idx < len; ++idx) { tree targ_parms = NULL_TREE; if (packed_args) /* Extract the next argument from the argument pack. */ targ = TREE_VEC_ELT (packed_args, idx); if (PACK_EXPANSION_P (targ)) /* Look at the pattern of the pack expansion. */ targ = PACK_EXPANSION_PATTERN (targ); /* Extract the template parameters from the template argument. */ if (TREE_CODE (targ) == TEMPLATE_DECL) targ_parms = DECL_INNERMOST_TEMPLATE_PARMS (targ); else if (TREE_CODE (targ) == TEMPLATE_TEMPLATE_PARM) targ_parms = DECL_INNERMOST_TEMPLATE_PARMS (TYPE_NAME (targ)); /* Verify that we can coerce the template template parameters from the template argument to the template parameter. This requires an exact match. */ if (targ_parms && !coerce_template_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (tparm), targ_parms, tf_none, tparm, targs)) { ret = false; goto out; } } } } out: --processing_template_decl; return ret; } /* Since type attributes aren't mangled, we need to strip them from template type arguments. */ tree canonicalize_type_argument (tree arg, tsubst_flags_t complain) { if (!arg || arg == error_mark_node || arg == TYPE_CANONICAL (arg)) return arg; bool removed_attributes = false; tree canon = strip_typedefs (arg, &removed_attributes); if (removed_attributes && (complain & tf_warning)) warning (OPT_Wignored_attributes, "ignoring attributes on template argument %qT", arg); return canon; } /* And from inside dependent non-type arguments like sizeof(Type). */ static tree canonicalize_expr_argument (tree arg, tsubst_flags_t complain) { if (!arg || arg == error_mark_node) return arg; bool removed_attributes = false; tree canon = strip_typedefs_expr (arg, &removed_attributes); if (removed_attributes && (complain & tf_warning)) warning (OPT_Wignored_attributes, "ignoring attributes in template argument %qE", arg); return canon; } /* A template declaration can be substituted for a constrained template template parameter only when the argument is no more constrained than the parameter. */ static bool is_compatible_template_arg (tree parm, tree arg) { tree parm_cons = get_constraints (parm); /* For now, allow constrained template template arguments and unconstrained template template parameters. */ if (parm_cons == NULL_TREE) return true; /* If the template parameter is constrained, we need to rewrite its constraints in terms of the ARG's template parameters. This ensures that all of the template parameter types will have the same depth. Note that this is only valid when coerce_template_template_parm is true for the innermost template parameters of PARM and ARG. In other words, because coercion is successful, this conversion will be valid. */ tree new_args = NULL_TREE; if (parm_cons) { tree aparms = DECL_INNERMOST_TEMPLATE_PARMS (arg); new_args = template_parms_level_to_args (aparms); parm_cons = tsubst_constraint_info (parm_cons, new_args, tf_none, NULL_TREE); if (parm_cons == error_mark_node) return false; } return weakly_subsumes (parm_cons, new_args, arg); } // Convert a placeholder argument into a binding to the original // parameter. The original parameter is saved as the TREE_TYPE of // ARG. static inline tree convert_wildcard_argument (tree parm, tree arg) { TREE_TYPE (arg) = parm; return arg; } /* We can't fully resolve ARG given as a non-type template argument to TYPE, because one of them is dependent. But we need to represent the conversion for the benefit of cp_tree_equal. */ static tree maybe_convert_nontype_argument (tree type, tree arg) { /* Auto parms get no conversion. */ if (type_uses_auto (type)) return arg; /* We don't need or want to add this conversion now if we're going to use the argument for deduction. */ if (value_dependent_expression_p (arg)) return arg; type = cv_unqualified (type); tree argtype = TREE_TYPE (arg); if (same_type_p (type, argtype)) return arg; arg = build1 (IMPLICIT_CONV_EXPR, type, arg); IMPLICIT_CONV_EXPR_NONTYPE_ARG (arg) = true; return arg; } /* Convert the indicated template ARG as necessary to match the indicated template PARM. Returns the converted ARG, or error_mark_node if the conversion was unsuccessful. Error and warning messages are issued under control of COMPLAIN. This conversion is for the Ith parameter in the parameter list. ARGS is the full set of template arguments deduced so far. */ static tree convert_template_argument (tree parm, tree arg, tree args, tsubst_flags_t complain, int i, tree in_decl) { tree orig_arg; tree val; int is_type, requires_type, is_tmpl_type, requires_tmpl_type; if (parm == error_mark_node || error_operand_p (arg)) return error_mark_node; /* Trivially convert placeholders. */ if (TREE_CODE (arg) == WILDCARD_DECL) return convert_wildcard_argument (parm, arg); if (arg == any_targ_node) return arg; if (TREE_CODE (arg) == TREE_LIST && TREE_CODE (TREE_VALUE (arg)) == OFFSET_REF) { /* The template argument was the name of some member function. That's usually invalid, but static members are OK. In any case, grab the underlying fields/functions and issue an error later if required. */ TREE_TYPE (arg) = unknown_type_node; } orig_arg = arg; requires_tmpl_type = TREE_CODE (parm) == TEMPLATE_DECL; requires_type = (TREE_CODE (parm) == TYPE_DECL || requires_tmpl_type); /* When determining whether an argument pack expansion is a template, look at the pattern. */ if (TREE_CODE (arg) == TYPE_PACK_EXPANSION) arg = PACK_EXPANSION_PATTERN (arg); /* Deal with an injected-class-name used as a template template arg. */ if (requires_tmpl_type && CLASS_TYPE_P (arg)) { tree t = maybe_get_template_decl_from_type_decl (TYPE_NAME (arg)); if (TREE_CODE (t) == TEMPLATE_DECL) { if (cxx_dialect >= cxx11) /* OK under DR 1004. */; else if (complain & tf_warning_or_error) pedwarn (input_location, OPT_Wpedantic, "injected-class-name %qD" " used as template template argument", TYPE_NAME (arg)); else if (flag_pedantic_errors) t = arg; arg = t; } } is_tmpl_type = ((TREE_CODE (arg) == TEMPLATE_DECL && TREE_CODE (DECL_TEMPLATE_RESULT (arg)) == TYPE_DECL) || (requires_tmpl_type && TREE_CODE (arg) == TYPE_ARGUMENT_PACK) || TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE); if (is_tmpl_type && (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE)) arg = TYPE_STUB_DECL (arg); is_type = TYPE_P (arg) || is_tmpl_type; if (requires_type && ! is_type && TREE_CODE (arg) == SCOPE_REF && TREE_CODE (TREE_OPERAND (arg, 0)) == TEMPLATE_TYPE_PARM) { if (TREE_CODE (TREE_OPERAND (arg, 1)) == BIT_NOT_EXPR) { if (complain & tf_error) error ("invalid use of destructor %qE as a type", orig_arg); return error_mark_node; } permerror (input_location, "to refer to a type member of a template parameter, " "use %<typename %E%>", orig_arg); orig_arg = make_typename_type (TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1), typename_type, complain); arg = orig_arg; is_type = 1; } if (is_type != requires_type) { if (in_decl) { if (complain & tf_error) { error ("type/value mismatch at argument %d in template " "parameter list for %qD", i + 1, in_decl); if (is_type) { /* The template argument is a type, but we're expecting an expression. */ inform (input_location, " expected a constant of type %qT, got %qT", TREE_TYPE (parm), (DECL_P (arg) ? DECL_NAME (arg) : orig_arg)); /* [temp.arg]/2: "In a template-argument, an ambiguity between a type-id and an expression is resolved to a type-id, regardless of the form of the corresponding template-parameter." So give the user a clue. */ if (TREE_CODE (arg) == FUNCTION_TYPE) inform (input_location, " ambiguous template argument " "for non-type template parameter is treated as " "function type"); } else if (requires_tmpl_type) inform (input_location, " expected a class template, got %qE", orig_arg); else inform (input_location, " expected a type, got %qE", orig_arg); } } return error_mark_node; } if (is_tmpl_type ^ requires_tmpl_type) { if (in_decl && (complain & tf_error)) { error ("type/value mismatch at argument %d in template " "parameter list for %qD", i + 1, in_decl); if (is_tmpl_type) inform (input_location, " expected a type, got %qT", DECL_NAME (arg)); else inform (input_location, " expected a class template, got %qT", orig_arg); } return error_mark_node; } if (template_parameter_pack_p (parm) && ARGUMENT_PACK_P (orig_arg)) /* We already did the appropriate conversion when packing args. */ val = orig_arg; else if (is_type) { if (requires_tmpl_type) { if (TREE_CODE (TREE_TYPE (arg)) == UNBOUND_CLASS_TEMPLATE) /* The number of argument required is not known yet. Just accept it for now. */ val = orig_arg; else { tree parmparm = DECL_INNERMOST_TEMPLATE_PARMS (parm); tree argparm; /* Strip alias templates that are equivalent to another template. */ arg = get_underlying_template (arg); argparm = DECL_INNERMOST_TEMPLATE_PARMS (arg); if (coerce_template_template_parms (parmparm, argparm, complain, in_decl, args)) { val = arg; /* TEMPLATE_TEMPLATE_PARM node is preferred over TEMPLATE_DECL. */ if (val != error_mark_node) { if (DECL_TEMPLATE_TEMPLATE_PARM_P (val)) val = TREE_TYPE (val); if (TREE_CODE (orig_arg) == TYPE_PACK_EXPANSION) val = make_pack_expansion (val, complain); } } else { if (in_decl && (complain & tf_error)) { error ("type/value mismatch at argument %d in " "template parameter list for %qD", i + 1, in_decl); inform (input_location, " expected a template of type %qD, got %qT", parm, orig_arg); } val = error_mark_node; } // Check that the constraints are compatible before allowing the // substitution. if (val != error_mark_node) if (!is_compatible_template_arg (parm, arg)) { if (in_decl && (complain & tf_error)) { error ("constraint mismatch at argument %d in " "template parameter list for %qD", i + 1, in_decl); inform (input_location, " expected %qD but got %qD", parm, arg); } val = error_mark_node; } } } else val = orig_arg; /* We only form one instance of each template specialization. Therefore, if we use a non-canonical variant (i.e., a typedef), any future messages referring to the type will use the typedef, which is confusing if those future uses do not themselves also use the typedef. */ if (TYPE_P (val)) val = canonicalize_type_argument (val, complain); } else { tree t = TREE_TYPE (parm); if (TEMPLATE_PARM_LEVEL (get_template_parm_index (parm)) > TMPL_ARGS_DEPTH (args)) /* We don't have enough levels of args to do any substitution. This can happen in the context of -fnew-ttp-matching. */; else if (tree a = type_uses_auto (t)) { t = do_auto_deduction (t, arg, a, complain, adc_unify, args); if (t == error_mark_node) return error_mark_node; } else t = tsubst (t, args, complain, in_decl); if (invalid_nontype_parm_type_p (t, complain)) return error_mark_node; if (t != TREE_TYPE (parm)) t = canonicalize_type_argument (t, complain); if (!type_dependent_expression_p (orig_arg) && !uses_template_parms (t)) /* We used to call digest_init here. However, digest_init will report errors, which we don't want when complain is zero. More importantly, digest_init will try too hard to convert things: for example, `0' should not be converted to pointer type at this point according to the standard. Accepting this is not merely an extension, since deciding whether or not these conversions can occur is part of determining which function template to call, or whether a given explicit argument specification is valid. */ val = convert_nontype_argument (t, orig_arg, complain); else { val = canonicalize_expr_argument (orig_arg, complain); val = maybe_convert_nontype_argument (t, val); } if (val == NULL_TREE) val = error_mark_node; else if (val == error_mark_node && (complain & tf_error)) error_at (cp_expr_loc_or_input_loc (orig_arg), "could not convert template argument %qE from %qT to %qT", orig_arg, TREE_TYPE (orig_arg), t); if (INDIRECT_REF_P (val)) { /* Reject template arguments that are references to built-in functions with no library fallbacks. */ const_tree inner = TREE_OPERAND (val, 0); const_tree innertype = TREE_TYPE (inner); if (innertype && TYPE_REF_P (innertype) && TREE_CODE (TREE_TYPE (innertype)) == FUNCTION_TYPE && TREE_OPERAND_LENGTH (inner) > 0 && reject_gcc_builtin (TREE_OPERAND (inner, 0))) return error_mark_node; } if (TREE_CODE (val) == SCOPE_REF) { /* Strip typedefs from the SCOPE_REF. */ tree type = canonicalize_type_argument (TREE_TYPE (val), complain); tree scope = canonicalize_type_argument (TREE_OPERAND (val, 0), complain); val = build_qualified_name (type, scope, TREE_OPERAND (val, 1), QUALIFIED_NAME_IS_TEMPLATE (val)); } } return val; } /* Coerces the remaining template arguments in INNER_ARGS (from ARG_IDX to the end) into the parameter pack at PARM_IDX in PARMS. Returns the coerced argument pack. PARM_IDX is the position of this parameter in the template parameter list. ARGS is the original template argument list. */ static tree coerce_template_parameter_pack (tree parms, int parm_idx, tree args, tree inner_args, int arg_idx, tree new_args, int* lost, tree in_decl, tsubst_flags_t complain) { tree parm = TREE_VEC_ELT (parms, parm_idx); int nargs = inner_args ? NUM_TMPL_ARGS (inner_args) : 0; tree packed_args; tree argument_pack; tree packed_parms = NULL_TREE; if (arg_idx > nargs) arg_idx = nargs; if (tree packs = fixed_parameter_pack_p (TREE_VALUE (parm))) { /* When the template parameter is a non-type template parameter pack or template template parameter pack whose type or template parameters use parameter packs, we know exactly how many arguments we are looking for. Build a vector of the instantiated decls for these template parameters in PACKED_PARMS. */ /* We can't use make_pack_expansion here because it would interpret a _DECL as a use rather than a declaration. */ tree decl = TREE_VALUE (parm); tree exp = cxx_make_type (TYPE_PACK_EXPANSION); SET_PACK_EXPANSION_PATTERN (exp, decl); PACK_EXPANSION_PARAMETER_PACKS (exp) = packs; SET_TYPE_STRUCTURAL_EQUALITY (exp); TREE_VEC_LENGTH (args)--; packed_parms = tsubst_pack_expansion (exp, args, complain, decl); TREE_VEC_LENGTH (args)++; if (packed_parms == error_mark_node) return error_mark_node; /* If we're doing a partial instantiation of a member template, verify that all of the types used for the non-type template parameter pack are, in fact, valid for non-type template parameters. */ if (arg_idx < nargs && PACK_EXPANSION_P (TREE_VEC_ELT (inner_args, arg_idx))) { int j, len = TREE_VEC_LENGTH (packed_parms); for (j = 0; j < len; ++j) { tree t = TREE_VEC_ELT (packed_parms, j); if (TREE_CODE (t) == PARM_DECL && invalid_nontype_parm_type_p (TREE_TYPE (t), complain)) return error_mark_node; } /* We don't know how many args we have yet, just use the unconverted ones for now. */ return NULL_TREE; } packed_args = make_tree_vec (TREE_VEC_LENGTH (packed_parms)); } /* Check if we have a placeholder pack, which indicates we're in the context of a introduction list. In that case we want to match this pack to the single placeholder. */ else if (arg_idx < nargs && TREE_CODE (TREE_VEC_ELT (inner_args, arg_idx)) == WILDCARD_DECL && WILDCARD_PACK_P (TREE_VEC_ELT (inner_args, arg_idx))) { nargs = arg_idx + 1; packed_args = make_tree_vec (1); } else packed_args = make_tree_vec (nargs - arg_idx); /* Convert the remaining arguments, which will be a part of the parameter pack "parm". */ int first_pack_arg = arg_idx; for (; arg_idx < nargs; ++arg_idx) { tree arg = TREE_VEC_ELT (inner_args, arg_idx); tree actual_parm = TREE_VALUE (parm); int pack_idx = arg_idx - first_pack_arg; if (packed_parms) { /* Once we've packed as many args as we have types, stop. */ if (pack_idx >= TREE_VEC_LENGTH (packed_parms)) break; else if (PACK_EXPANSION_P (arg)) /* We don't know how many args we have yet, just use the unconverted ones for now. */ return NULL_TREE; else actual_parm = TREE_VEC_ELT (packed_parms, pack_idx); } if (arg == error_mark_node) { if (complain & tf_error) error ("template argument %d is invalid", arg_idx + 1); } else arg = convert_template_argument (actual_parm, arg, new_args, complain, parm_idx, in_decl); if (arg == error_mark_node) (*lost)++; TREE_VEC_ELT (packed_args, pack_idx) = arg; } if (arg_idx - first_pack_arg < TREE_VEC_LENGTH (packed_args) && TREE_VEC_LENGTH (packed_args) > 0) { if (complain & tf_error) error ("wrong number of template arguments (%d, should be %d)", arg_idx - first_pack_arg, TREE_VEC_LENGTH (packed_args)); return error_mark_node; } if (TREE_CODE (TREE_VALUE (parm)) == TYPE_DECL || TREE_CODE (TREE_VALUE (parm)) == TEMPLATE_DECL) argument_pack = cxx_make_type (TYPE_ARGUMENT_PACK); else { argument_pack = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (argument_pack) = 1; } SET_ARGUMENT_PACK_ARGS (argument_pack, packed_args); if (CHECKING_P) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (packed_args, TREE_VEC_LENGTH (packed_args)); return argument_pack; } /* Returns the number of pack expansions in the template argument vector ARGS. */ static int pack_expansion_args_count (tree args) { int i; int count = 0; if (args) for (i = 0; i < TREE_VEC_LENGTH (args); ++i) { tree elt = TREE_VEC_ELT (args, i); if (elt && PACK_EXPANSION_P (elt)) ++count; } return count; } /* Convert all template arguments to their appropriate types, and return a vector containing the innermost resulting template arguments. If any error occurs, return error_mark_node. Error and warning messages are issued under control of COMPLAIN. If REQUIRE_ALL_ARGS is false, argument deduction will be performed for arguments not specified in ARGS. Otherwise, if USE_DEFAULT_ARGS is true, default arguments will be used to fill in unspecified arguments. If REQUIRE_ALL_ARGS is true, but USE_DEFAULT_ARGS is false, then all arguments must be specified in ARGS. */ static tree coerce_template_parms (tree parms, tree args, tree in_decl, tsubst_flags_t complain, bool require_all_args, bool use_default_args) { int nparms, nargs, parm_idx, arg_idx, lost = 0; tree orig_inner_args; tree inner_args; tree new_args; tree new_inner_args; /* When used as a boolean value, indicates whether this is a variadic template parameter list. Since it's an int, we can also subtract it from nparms to get the number of non-variadic parameters. */ int variadic_p = 0; int variadic_args_p = 0; int post_variadic_parms = 0; /* Adjustment to nparms for fixed parameter packs. */ int fixed_pack_adjust = 0; int fixed_packs = 0; int missing = 0; /* Likewise for parameters with default arguments. */ int default_p = 0; if (args == error_mark_node) return error_mark_node; nparms = TREE_VEC_LENGTH (parms); /* Determine if there are any parameter packs or default arguments. */ for (parm_idx = 0; parm_idx < nparms; ++parm_idx) { tree parm = TREE_VEC_ELT (parms, parm_idx); if (variadic_p) ++post_variadic_parms; if (template_parameter_pack_p (TREE_VALUE (parm))) ++variadic_p; if (TREE_PURPOSE (parm)) ++default_p; } inner_args = orig_inner_args = INNERMOST_TEMPLATE_ARGS (args); /* If there are no parameters that follow a parameter pack, we need to expand any argument packs so that we can deduce a parameter pack from some non-packed args followed by an argument pack, as in variadic85.C. If there are such parameters, we need to leave argument packs intact so the arguments are assigned properly. This can happen when dealing with a nested class inside a partial specialization of a class template, as in variadic92.C, or when deducing a template parameter pack from a sub-declarator, as in variadic114.C. */ if (!post_variadic_parms) inner_args = expand_template_argument_pack (inner_args); /* Count any pack expansion args. */ variadic_args_p = pack_expansion_args_count (inner_args); nargs = inner_args ? NUM_TMPL_ARGS (inner_args) : 0; if ((nargs - variadic_args_p > nparms && !variadic_p) || (nargs < nparms - variadic_p && require_all_args && !variadic_args_p && (!use_default_args || (TREE_VEC_ELT (parms, nargs) != error_mark_node && !TREE_PURPOSE (TREE_VEC_ELT (parms, nargs)))))) { bad_nargs: if (complain & tf_error) { if (variadic_p || default_p) { nparms -= variadic_p + default_p; error ("wrong number of template arguments " "(%d, should be at least %d)", nargs, nparms); } else error ("wrong number of template arguments " "(%d, should be %d)", nargs, nparms); if (in_decl) inform (DECL_SOURCE_LOCATION (in_decl), "provided for %qD", in_decl); } return error_mark_node; } /* We can't pass a pack expansion to a non-pack parameter of an alias template (DR 1430). */ else if (in_decl && (DECL_ALIAS_TEMPLATE_P (in_decl) || concept_definition_p (in_decl)) && variadic_args_p && nargs - variadic_args_p < nparms - variadic_p) { if (complain & tf_error) { for (int i = 0; i < TREE_VEC_LENGTH (inner_args); ++i) { tree arg = TREE_VEC_ELT (inner_args, i); tree parm = TREE_VALUE (TREE_VEC_ELT (parms, i)); if (PACK_EXPANSION_P (arg) && !template_parameter_pack_p (parm)) { if (DECL_ALIAS_TEMPLATE_P (in_decl)) error_at (location_of (arg), "pack expansion argument for non-pack parameter " "%qD of alias template %qD", parm, in_decl); else error_at (location_of (arg), "pack expansion argument for non-pack parameter " "%qD of concept %qD", parm, in_decl); inform (DECL_SOURCE_LOCATION (parm), "declared here"); goto found; } } gcc_unreachable (); found:; } return error_mark_node; } /* We need to evaluate the template arguments, even though this template-id may be nested within a "sizeof". */ cp_evaluated ev; new_inner_args = make_tree_vec (nparms); new_args = add_outermost_template_args (args, new_inner_args); int pack_adjust = 0; for (parm_idx = 0, arg_idx = 0; parm_idx < nparms; parm_idx++, arg_idx++) { tree arg; tree parm; /* Get the Ith template parameter. */ parm = TREE_VEC_ELT (parms, parm_idx); if (parm == error_mark_node) { TREE_VEC_ELT (new_inner_args, arg_idx) = error_mark_node; continue; } /* Calculate the next argument. */ if (arg_idx < nargs) arg = TREE_VEC_ELT (inner_args, arg_idx); else arg = NULL_TREE; if (template_parameter_pack_p (TREE_VALUE (parm)) && (arg || require_all_args || !(complain & tf_partial)) && !(arg && ARGUMENT_PACK_P (arg))) { /* Some arguments will be placed in the template parameter pack PARM. */ arg = coerce_template_parameter_pack (parms, parm_idx, args, inner_args, arg_idx, new_args, &lost, in_decl, complain); if (arg == NULL_TREE) { /* We don't know how many args we have yet, just use the unconverted (and still packed) ones for now. */ new_inner_args = orig_inner_args; arg_idx = nargs; break; } TREE_VEC_ELT (new_inner_args, parm_idx) = arg; /* Store this argument. */ if (arg == error_mark_node) { lost++; /* We are done with all of the arguments. */ arg_idx = nargs; break; } else { pack_adjust = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg)) - 1; arg_idx += pack_adjust; if (fixed_parameter_pack_p (TREE_VALUE (parm))) { ++fixed_packs; fixed_pack_adjust += pack_adjust; } } continue; } else if (arg) { if (PACK_EXPANSION_P (arg)) { /* "If every valid specialization of a variadic template requires an empty template parameter pack, the template is ill-formed, no diagnostic required." So check that the pattern works with this parameter. */ tree pattern = PACK_EXPANSION_PATTERN (arg); tree conv = convert_template_argument (TREE_VALUE (parm), pattern, new_args, complain, parm_idx, in_decl); if (conv == error_mark_node) { if (complain & tf_error) inform (input_location, "so any instantiation with a " "non-empty parameter pack would be ill-formed"); ++lost; } else if (TYPE_P (conv) && !TYPE_P (pattern)) /* Recover from missing typename. */ TREE_VEC_ELT (inner_args, arg_idx) = make_pack_expansion (conv, complain); /* We don't know how many args we have yet, just use the unconverted ones for now. */ new_inner_args = inner_args; arg_idx = nargs; break; } } else if (require_all_args) { /* There must be a default arg in this case. */ arg = tsubst_template_arg (TREE_PURPOSE (parm), new_args, complain, in_decl); /* The position of the first default template argument, is also the number of non-defaulted arguments in NEW_INNER_ARGS. Record that. */ if (!NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args, arg_idx - pack_adjust); } else break; if (arg == error_mark_node) { if (complain & tf_error) error ("template argument %d is invalid", arg_idx + 1); } else if (!arg) { /* This can occur if there was an error in the template parameter list itself (which we would already have reported) that we are trying to recover from, e.g., a class template with a parameter list such as template<typename..., typename> (cpp0x/variadic150.C). */ ++lost; /* This can also happen with a fixed parameter pack (71834). */ if (arg_idx >= nargs) ++missing; } else arg = convert_template_argument (TREE_VALUE (parm), arg, new_args, complain, parm_idx, in_decl); if (arg == error_mark_node) lost++; TREE_VEC_ELT (new_inner_args, arg_idx - pack_adjust) = arg; } if (missing || arg_idx < nargs - variadic_args_p) { /* If we had fixed parameter packs, we didn't know how many arguments we actually needed earlier; now we do. */ nparms += fixed_pack_adjust; variadic_p -= fixed_packs; goto bad_nargs; } if (arg_idx < nargs) { /* We had some pack expansion arguments that will only work if the packs are empty, but wait until instantiation time to complain. See variadic-ttp3.C. */ /* Except that we can't provide empty packs to alias templates or concepts when there are no corresponding parameters. Basically, we can get here with this: template<typename T> concept C = true; template<typename... Args> requires C<Args...> void f(); When parsing C<Args...>, we try to form a concept check of C<?, Args...>. Without the extra check for substituting an empty pack past the last parameter, we can accept the check as valid. FIXME: This may be valid for alias templates (but I doubt it). FIXME: The error could be better also. */ if (in_decl && concept_definition_p (in_decl)) { if (complain & tf_error) error_at (location_of (TREE_VEC_ELT (args, arg_idx)), "too many arguments"); return error_mark_node; } int len = nparms + (nargs - arg_idx); tree args = make_tree_vec (len); int i = 0; for (; i < nparms; ++i) TREE_VEC_ELT (args, i) = TREE_VEC_ELT (new_inner_args, i); for (; i < len; ++i, ++arg_idx) TREE_VEC_ELT (args, i) = TREE_VEC_ELT (inner_args, arg_idx - pack_adjust); new_inner_args = args; } if (lost) { gcc_assert (!(complain & tf_error) || seen_error ()); return error_mark_node; } if (CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_inner_args, TREE_VEC_LENGTH (new_inner_args)); return new_inner_args; } /* Convert all template arguments to their appropriate types, and return a vector containing the innermost resulting template arguments. If any error occurs, return error_mark_node. Error and warning messages are not issued. Note that no function argument deduction is performed, and default arguments are used to fill in unspecified arguments. */ tree coerce_template_parms (tree parms, tree args, tree in_decl) { return coerce_template_parms (parms, args, in_decl, tf_none, true, true); } /* Convert all template arguments to their appropriate type, and instantiate default arguments as needed. This returns a vector containing the innermost resulting template arguments, or error_mark_node if unsuccessful. */ tree coerce_template_parms (tree parms, tree args, tree in_decl, tsubst_flags_t complain) { return coerce_template_parms (parms, args, in_decl, complain, true, true); } /* Like coerce_template_parms. If PARMS represents all template parameters levels, this function returns a vector of vectors representing all the resulting argument levels. Note that in this case, only the innermost arguments are coerced because the outermost ones are supposed to have been coerced already. Otherwise, if PARMS represents only (the innermost) vector of parameters, this function returns a vector containing just the innermost resulting arguments. */ static tree coerce_innermost_template_parms (tree parms, tree args, tree in_decl, tsubst_flags_t complain, bool require_all_args, bool use_default_args) { int parms_depth = TMPL_PARMS_DEPTH (parms); int args_depth = TMPL_ARGS_DEPTH (args); tree coerced_args; if (parms_depth > 1) { coerced_args = make_tree_vec (parms_depth); tree level; int cur_depth; for (level = parms, cur_depth = parms_depth; parms_depth > 0 && level != NULL_TREE; level = TREE_CHAIN (level), --cur_depth) { tree l; if (cur_depth == args_depth) l = coerce_template_parms (TREE_VALUE (level), args, in_decl, complain, require_all_args, use_default_args); else l = TMPL_ARGS_LEVEL (args, cur_depth); if (l == error_mark_node) return error_mark_node; SET_TMPL_ARGS_LEVEL (coerced_args, cur_depth, l); } } else coerced_args = coerce_template_parms (INNERMOST_TEMPLATE_PARMS (parms), args, in_decl, complain, require_all_args, use_default_args); return coerced_args; } /* Returns true if T is a wrapper to make a C++20 template parameter object const. */ static bool class_nttp_const_wrapper_p (tree t) { if (cxx_dialect < cxx20) return false; return (TREE_CODE (t) == VIEW_CONVERT_EXPR && CP_TYPE_CONST_P (TREE_TYPE (t)) && TREE_CODE (TREE_OPERAND (t, 0)) == TEMPLATE_PARM_INDEX); } /* Returns 1 if template args OT and NT are equivalent. */ int template_args_equal (tree ot, tree nt, bool partial_order /* = false */) { if (nt == ot) return 1; if (nt == NULL_TREE || ot == NULL_TREE) return false; if (nt == any_targ_node || ot == any_targ_node) return true; if (class_nttp_const_wrapper_p (nt)) nt = TREE_OPERAND (nt, 0); if (class_nttp_const_wrapper_p (ot)) ot = TREE_OPERAND (ot, 0); if (TREE_CODE (nt) == TREE_VEC || TREE_CODE (ot) == TREE_VEC) /* For member templates */ return TREE_CODE (ot) == TREE_CODE (nt) && comp_template_args (ot, nt); else if (PACK_EXPANSION_P (ot) || PACK_EXPANSION_P (nt)) return (PACK_EXPANSION_P (ot) && PACK_EXPANSION_P (nt) && template_args_equal (PACK_EXPANSION_PATTERN (ot), PACK_EXPANSION_PATTERN (nt)) && template_args_equal (PACK_EXPANSION_EXTRA_ARGS (ot), PACK_EXPANSION_EXTRA_ARGS (nt))); else if (ARGUMENT_PACK_P (ot) || ARGUMENT_PACK_P (nt)) return cp_tree_equal (ot, nt); else if (TREE_CODE (ot) == ARGUMENT_PACK_SELECT) gcc_unreachable (); else if (TYPE_P (nt) || TYPE_P (ot)) { if (!(TYPE_P (nt) && TYPE_P (ot))) return false; /* Don't treat an alias template specialization with dependent arguments as equivalent to its underlying type when used as a template argument; we need them to be distinct so that we substitute into the specialization arguments at instantiation time. And aliases can't be equivalent without being ==, so we don't need to look any deeper. During partial ordering, however, we need to treat them normally so that we can order uses of the same alias with different cv-qualification (79960). */ if (!partial_order && (TYPE_ALIAS_P (nt) || TYPE_ALIAS_P (ot))) return false; else return same_type_p (ot, nt); } else { /* Try to treat a template non-type argument that has been converted to the parameter type as equivalent to one that hasn't yet. */ for (enum tree_code code1 = TREE_CODE (ot); CONVERT_EXPR_CODE_P (code1) || code1 == NON_LVALUE_EXPR; code1 = TREE_CODE (ot)) ot = TREE_OPERAND (ot, 0); for (enum tree_code code2 = TREE_CODE (nt); CONVERT_EXPR_CODE_P (code2) || code2 == NON_LVALUE_EXPR; code2 = TREE_CODE (nt)) nt = TREE_OPERAND (nt, 0); return cp_tree_equal (ot, nt); } } /* Returns 1 iff the OLDARGS and NEWARGS are in fact identical sets of template arguments. Returns 0 otherwise, and updates OLDARG_PTR and NEWARG_PTR with the offending arguments if they are non-NULL. */ int comp_template_args (tree oldargs, tree newargs, tree *oldarg_ptr, tree *newarg_ptr, bool partial_order) { int i; if (oldargs == newargs) return 1; if (!oldargs || !newargs) return 0; if (TREE_VEC_LENGTH (oldargs) != TREE_VEC_LENGTH (newargs)) return 0; for (i = 0; i < TREE_VEC_LENGTH (oldargs); ++i) { tree nt = TREE_VEC_ELT (newargs, i); tree ot = TREE_VEC_ELT (oldargs, i); if (! template_args_equal (ot, nt, partial_order)) { if (oldarg_ptr != NULL) *oldarg_ptr = ot; if (newarg_ptr != NULL) *newarg_ptr = nt; return 0; } } return 1; } inline bool comp_template_args_porder (tree oargs, tree nargs) { return comp_template_args (oargs, nargs, NULL, NULL, true); } /* Implement a freelist interface for objects of type T. Head is a separate object, rather than a regular member, so that we can define it as a GTY deletable pointer, which is highly desirable. A data member could be declared that way, but then the containing object would implicitly get GTY((user)), which would prevent us from instantiating freelists as global objects. Although this way we can create freelist global objects, they're such thin wrappers that instantiating temporaries at every use loses nothing and saves permanent storage for the freelist object. Member functions next, anew, poison and reinit have default implementations that work for most of the types we're interested in, but if they don't work for some type, they should be explicitly specialized. See the comments before them for requirements, and the example specializations for the tree_list_freelist. */ template <typename T> class freelist { /* Return the next object in a chain. We could just do type punning, but if we access the object with its underlying type, we avoid strict-aliasing trouble. This needs only work between poison and reinit. */ static T *&next (T *obj) { return obj->next; } /* Return a newly allocated, uninitialized or minimally-initialized object of type T. Any initialization performed by anew should either remain across the life of the object and the execution of poison, or be redone by reinit. */ static T *anew () { return ggc_alloc<T> (); } /* Optionally scribble all over the bits holding the object, so that they become (mostly?) uninitialized memory. This is called while preparing to make the object part of the free list. */ static void poison (T *obj) { T *p ATTRIBUTE_UNUSED = obj; T **q ATTRIBUTE_UNUSED = &next (obj); #ifdef ENABLE_GC_CHECKING /* Poison the data, to indicate the data is garbage. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, sizeof (*p))); memset (p, 0xa5, sizeof (*p)); #endif /* Let valgrind know the object is free. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, sizeof (*p))); /* Let valgrind know the next portion of the object is available, but uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (q, sizeof (*q))); } /* Bring an object that underwent at least one lifecycle after anew and before the most recent free and poison, back to a usable state, reinitializing whatever is needed for it to be functionally equivalent to an object just allocated and returned by anew. This may poison or clear the next field, used by freelist housekeeping after poison was called. */ static void reinit (T *obj) { T **q ATTRIBUTE_UNUSED = &next (obj); #ifdef ENABLE_GC_CHECKING memset (q, 0xa5, sizeof (*q)); #endif /* Let valgrind know the entire object is available, but uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (*obj))); } /* Reference a GTY-deletable pointer that points to the first object in the free list proper. */ T *&head; public: /* Construct a freelist object chaining objects off of HEAD. */ freelist (T *&head) : head(head) {} /* Add OBJ to the free object list. The former head becomes OBJ's successor. */ void free (T *obj) { poison (obj); next (obj) = head; head = obj; } /* Take an object from the free list, if one is available, or allocate a new one. Objects taken from the free list should be regarded as filled with garbage, except for bits that are configured to be preserved across free and alloc. */ T *alloc () { if (head) { T *obj = head; head = next (head); reinit (obj); return obj; } else return anew (); } }; /* Explicitly specialize the interfaces for freelist<tree_node>: we want to allocate a TREE_LIST using the usual interface, and ensure TREE_CHAIN remains functional. Alas, we have to duplicate a bit of build_tree_list logic in reinit, so this could go out of sync. */ template <> inline tree & freelist<tree_node>::next (tree obj) { return TREE_CHAIN (obj); } template <> inline tree freelist<tree_node>::anew () { return build_tree_list (NULL, NULL); } template <> inline void freelist<tree_node>::poison (tree obj ATTRIBUTE_UNUSED) { int size ATTRIBUTE_UNUSED = sizeof (tree_list); tree p ATTRIBUTE_UNUSED = obj; tree_base *b ATTRIBUTE_UNUSED = &obj->base; tree *q ATTRIBUTE_UNUSED = &next (obj); #ifdef ENABLE_GC_CHECKING gcc_checking_assert (TREE_CODE (obj) == TREE_LIST); /* Poison the data, to indicate the data is garbage. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size)); memset (p, 0xa5, size); #endif /* Let valgrind know the object is free. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size)); /* But we still want to use the TREE_CODE and TREE_CHAIN parts. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b))); VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (q, sizeof (*q))); #ifdef ENABLE_GC_CHECKING VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (b, sizeof (*b))); /* Keep TREE_CHAIN functional. */ TREE_SET_CODE (obj, TREE_LIST); #else VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b))); #endif } template <> inline void freelist<tree_node>::reinit (tree obj ATTRIBUTE_UNUSED) { tree_base *b ATTRIBUTE_UNUSED = &obj->base; #ifdef ENABLE_GC_CHECKING gcc_checking_assert (TREE_CODE (obj) == TREE_LIST); VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (tree_list))); memset (obj, 0, sizeof (tree_list)); #endif /* Let valgrind know the entire object is available, but uninitialized. */ VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (obj, sizeof (tree_list))); #ifdef ENABLE_GC_CHECKING TREE_SET_CODE (obj, TREE_LIST); #else VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (b, sizeof (*b))); #endif } /* Point to the first object in the TREE_LIST freelist. */ static GTY((deletable)) tree tree_list_freelist_head; /* Return the/an actual TREE_LIST freelist. */ static inline freelist<tree_node> tree_list_freelist () { return tree_list_freelist_head; } /* Point to the first object in the tinst_level freelist. */ static GTY((deletable)) tinst_level *tinst_level_freelist_head; /* Return the/an actual tinst_level freelist. */ static inline freelist<tinst_level> tinst_level_freelist () { return tinst_level_freelist_head; } /* Point to the first object in the pending_template freelist. */ static GTY((deletable)) pending_template *pending_template_freelist_head; /* Return the/an actual pending_template freelist. */ static inline freelist<pending_template> pending_template_freelist () { return pending_template_freelist_head; } /* Build the TREE_LIST object out of a split list, store it permanently, and return it. */ tree tinst_level::to_list () { gcc_assert (split_list_p ()); tree ret = tree_list_freelist ().alloc (); TREE_PURPOSE (ret) = tldcl; TREE_VALUE (ret) = targs; tldcl = ret; targs = NULL; gcc_assert (tree_list_p ()); return ret; } const unsigned short tinst_level::refcount_infinity; /* Increment OBJ's refcount unless it is already infinite. */ static tinst_level * inc_refcount_use (tinst_level *obj) { if (obj && obj->refcount != tinst_level::refcount_infinity) ++obj->refcount; return obj; } /* Release storage for OBJ and node, if it's a TREE_LIST. */ void tinst_level::free (tinst_level *obj) { if (obj->tree_list_p ()) tree_list_freelist ().free (obj->get_node ()); tinst_level_freelist ().free (obj); } /* Decrement OBJ's refcount if not infinite. If it reaches zero, release OBJ's DECL and OBJ, and start over with the tinst_level object that used to be referenced by OBJ's NEXT. */ static void dec_refcount_use (tinst_level *obj) { while (obj && obj->refcount != tinst_level::refcount_infinity && !--obj->refcount) { tinst_level *next = obj->next; tinst_level::free (obj); obj = next; } } /* Modify PTR so that it points to OBJ, adjusting the refcounts of OBJ and of the former PTR. Omitting the second argument is equivalent to passing (T*)NULL; this is allowed because passing the zero-valued integral constant NULL confuses type deduction and/or overload resolution. */ template <typename T> static void set_refcount_ptr (T *& ptr, T *obj = NULL) { T *save = ptr; ptr = inc_refcount_use (obj); dec_refcount_use (save); } static void add_pending_template (tree d) { tree ti = (TYPE_P (d) ? CLASSTYPE_TEMPLATE_INFO (d) : DECL_TEMPLATE_INFO (d)); struct pending_template *pt; int level; if (TI_PENDING_TEMPLATE_FLAG (ti)) return; /* We are called both from instantiate_decl, where we've already had a tinst_level pushed, and instantiate_template, where we haven't. Compensate. */ gcc_assert (TREE_CODE (d) != TREE_LIST); level = !current_tinst_level || current_tinst_level->maybe_get_node () != d; if (level) push_tinst_level (d); pt = pending_template_freelist ().alloc (); pt->next = NULL; pt->tinst = NULL; set_refcount_ptr (pt->tinst, current_tinst_level); if (last_pending_template) last_pending_template->next = pt; else pending_templates = pt; last_pending_template = pt; TI_PENDING_TEMPLATE_FLAG (ti) = 1; if (level) pop_tinst_level (); } /* Return a TEMPLATE_ID_EXPR corresponding to the indicated FNS and ARGLIST. Valid choices for FNS are given in the cp-tree.def documentation for TEMPLATE_ID_EXPR. */ tree lookup_template_function (tree fns, tree arglist) { if (fns == error_mark_node || arglist == error_mark_node) return error_mark_node; gcc_assert (!arglist || TREE_CODE (arglist) == TREE_VEC); if (!is_overloaded_fn (fns) && !identifier_p (fns)) { error ("%q#D is not a function template", fns); return error_mark_node; } if (BASELINK_P (fns)) { BASELINK_FUNCTIONS (fns) = build2 (TEMPLATE_ID_EXPR, unknown_type_node, BASELINK_FUNCTIONS (fns), arglist); return fns; } return build2 (TEMPLATE_ID_EXPR, unknown_type_node, fns, arglist); } /* Within the scope of a template class S<T>, the name S gets bound (in build_self_reference) to a TYPE_DECL for the class, not a TEMPLATE_DECL. If DECL is a TYPE_DECL for current_class_type, or one of its enclosing classes, and that type is a template, return the associated TEMPLATE_DECL. Otherwise, the original DECL is returned. Also handle the case when DECL is a TREE_LIST of ambiguous injected-class-names from different bases. */ tree maybe_get_template_decl_from_type_decl (tree decl) { if (decl == NULL_TREE) return decl; /* DR 176: A lookup that finds an injected-class-name (10.2 [class.member.lookup]) can result in an ambiguity in certain cases (for example, if it is found in more than one base class). If all of the injected-class-names that are found refer to specializations of the same class template, and if the name is followed by a template-argument-list, the reference refers to the class template itself and not a specialization thereof, and is not ambiguous. */ if (TREE_CODE (decl) == TREE_LIST) { tree t, tmpl = NULL_TREE; for (t = decl; t; t = TREE_CHAIN (t)) { tree elt = maybe_get_template_decl_from_type_decl (TREE_VALUE (t)); if (!tmpl) tmpl = elt; else if (tmpl != elt) break; } if (tmpl && t == NULL_TREE) return tmpl; else return decl; } return (decl != NULL_TREE && DECL_SELF_REFERENCE_P (decl) && CLASSTYPE_TEMPLATE_INFO (TREE_TYPE (decl))) ? CLASSTYPE_TI_TEMPLATE (TREE_TYPE (decl)) : decl; } /* Given an IDENTIFIER_NODE (or type TEMPLATE_DECL) and a chain of parameters, find the desired type. D1 is the PTYPENAME terminal, and ARGLIST is the list of arguments. IN_DECL, if non-NULL, is the template declaration we are trying to instantiate. If ENTERING_SCOPE is nonzero, we are about to enter the scope of the class we are looking up. Issue error and warning messages under control of COMPLAIN. If the template class is really a local class in a template function, then the FUNCTION_CONTEXT is the function in which it is being instantiated. ??? Note that this function is currently called *twice* for each template-id: the first time from the parser, while creating the incomplete type (finish_template_type), and the second type during the real instantiation (instantiate_template_class). This is surely something that we want to avoid. It also causes some problems with argument coercion (see convert_nontype_argument for more information on this). */ static tree lookup_template_class_1 (tree d1, tree arglist, tree in_decl, tree context, int entering_scope, tsubst_flags_t complain) { tree templ = NULL_TREE, parmlist; tree t; spec_entry **slot; spec_entry *entry; spec_entry elt; hashval_t hash; if (identifier_p (d1)) { tree value = innermost_non_namespace_value (d1); if (value && DECL_TEMPLATE_TEMPLATE_PARM_P (value)) templ = value; else { if (context) push_decl_namespace (context); templ = lookup_name (d1); templ = maybe_get_template_decl_from_type_decl (templ); if (context) pop_decl_namespace (); } if (templ) context = DECL_CONTEXT (templ); } else if (TREE_CODE (d1) == TYPE_DECL && MAYBE_CLASS_TYPE_P (TREE_TYPE (d1))) { tree type = TREE_TYPE (d1); /* If we are declaring a constructor, say A<T>::A<T>, we will get an implicit typename for the second A. Deal with it. */ if (TREE_CODE (type) == TYPENAME_TYPE && TREE_TYPE (type)) type = TREE_TYPE (type); if (CLASSTYPE_TEMPLATE_INFO (type)) { templ = CLASSTYPE_TI_TEMPLATE (type); d1 = DECL_NAME (templ); } } else if (TREE_CODE (d1) == ENUMERAL_TYPE || (TYPE_P (d1) && MAYBE_CLASS_TYPE_P (d1))) { templ = TYPE_TI_TEMPLATE (d1); d1 = DECL_NAME (templ); } else if (DECL_TYPE_TEMPLATE_P (d1)) { templ = d1; d1 = DECL_NAME (templ); context = DECL_CONTEXT (templ); } else if (DECL_TEMPLATE_TEMPLATE_PARM_P (d1)) { templ = d1; d1 = DECL_NAME (templ); } /* Issue an error message if we didn't find a template. */ if (! templ) { if (complain & tf_error) error ("%qT is not a template", d1); return error_mark_node; } if (TREE_CODE (templ) != TEMPLATE_DECL /* Make sure it's a user visible template, if it was named by the user. */ || ((complain & tf_user) && !DECL_TEMPLATE_PARM_P (templ) && !PRIMARY_TEMPLATE_P (templ))) { if (complain & tf_error) { error ("non-template type %qT used as a template", d1); if (in_decl) error ("for template declaration %q+D", in_decl); } return error_mark_node; } complain &= ~tf_user; /* An alias that just changes the name of a template is equivalent to the other template, so if any of the arguments are pack expansions, strip the alias to avoid problems with a pack expansion passed to a non-pack alias template parameter (DR 1430). */ if (pack_expansion_args_count (INNERMOST_TEMPLATE_ARGS (arglist))) templ = get_underlying_template (templ); if (DECL_TEMPLATE_TEMPLATE_PARM_P (templ)) { tree parm; tree arglist2 = coerce_template_args_for_ttp (templ, arglist, complain); if (arglist2 == error_mark_node || (!uses_template_parms (arglist2) && check_instantiated_args (templ, arglist2, complain))) return error_mark_node; parm = bind_template_template_parm (TREE_TYPE (templ), arglist2); return parm; } else { tree template_type = TREE_TYPE (templ); tree gen_tmpl; tree type_decl; tree found = NULL_TREE; int arg_depth; int parm_depth; int is_dependent_type; int use_partial_inst_tmpl = false; if (template_type == error_mark_node) /* An error occurred while building the template TEMPL, and a diagnostic has most certainly been emitted for that already. Let's propagate that error. */ return error_mark_node; gen_tmpl = most_general_template (templ); parmlist = DECL_TEMPLATE_PARMS (gen_tmpl); parm_depth = TMPL_PARMS_DEPTH (parmlist); arg_depth = TMPL_ARGS_DEPTH (arglist); if (arg_depth == 1 && parm_depth > 1) { /* We've been given an incomplete set of template arguments. For example, given: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> struct S2<U*> {}; }; we will be called with an ARGLIST of `U*', but the TEMPLATE will be `template <class T> template <class U> struct S1<T>::S2'. We must fill in the missing arguments. */ tree ti = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (TREE_TYPE (templ)); arglist = add_outermost_template_args (TI_ARGS (ti), arglist); arg_depth = TMPL_ARGS_DEPTH (arglist); } /* Now we should have enough arguments. */ gcc_assert (parm_depth == arg_depth); /* From here on, we're only interested in the most general template. */ /* Calculate the BOUND_ARGS. These will be the args that are actually tsubst'd into the definition to create the instantiation. */ arglist = coerce_innermost_template_parms (parmlist, arglist, gen_tmpl, complain, /*require_all_args=*/true, /*use_default_args=*/true); if (arglist == error_mark_node) /* We were unable to bind the arguments. */ return error_mark_node; /* In the scope of a template class, explicit references to the template class refer to the type of the template, not any instantiation of it. For example, in: template <class T> class C { void f(C<T>); } the `C<T>' is just the same as `C'. Outside of the class, however, such a reference is an instantiation. */ if (entering_scope || !PRIMARY_TEMPLATE_P (gen_tmpl) || currently_open_class (template_type)) { tree tinfo = TYPE_TEMPLATE_INFO (template_type); if (tinfo && comp_template_args (TI_ARGS (tinfo), arglist)) return template_type; } /* If we already have this specialization, return it. */ elt.tmpl = gen_tmpl; elt.args = arglist; elt.spec = NULL_TREE; hash = spec_hasher::hash (&elt); entry = type_specializations->find_with_hash (&elt, hash); if (entry) return entry->spec; /* If the template's constraints are not satisfied, then we cannot form a valid type. Note that the check is deferred until after the hash lookup. This prevents redundant checks on previously instantiated specializations. */ if (flag_concepts && !DECL_ALIAS_TEMPLATE_P (gen_tmpl) && !constraints_satisfied_p (gen_tmpl, arglist)) { if (complain & tf_error) { auto_diagnostic_group d; error ("template constraint failure for %qD", gen_tmpl); diagnose_constraints (input_location, gen_tmpl, arglist); } return error_mark_node; } is_dependent_type = uses_template_parms (arglist); /* If the deduced arguments are invalid, then the binding failed. */ if (!is_dependent_type && check_instantiated_args (gen_tmpl, INNERMOST_TEMPLATE_ARGS (arglist), complain)) return error_mark_node; if (!is_dependent_type && !PRIMARY_TEMPLATE_P (gen_tmpl) && !LAMBDA_TYPE_P (TREE_TYPE (gen_tmpl)) && TREE_CODE (CP_DECL_CONTEXT (gen_tmpl)) == NAMESPACE_DECL) /* This occurs when the user has tried to define a tagged type in a scope that forbids it. We emitted an error during the parse. We didn't complete the bail out then, so here we are. */ return error_mark_node; context = DECL_CONTEXT (gen_tmpl); if (context && TYPE_P (context)) { context = tsubst_aggr_type (context, arglist, complain, in_decl, true); context = complete_type (context); } else context = tsubst (context, arglist, complain, in_decl); if (context == error_mark_node) return error_mark_node; if (!context) context = global_namespace; /* Create the type. */ if (DECL_ALIAS_TEMPLATE_P (gen_tmpl)) { /* The user referred to a specialization of an alias template represented by GEN_TMPL. [temp.alias]/2 says: When a template-id refers to the specialization of an alias template, it is equivalent to the associated type obtained by substitution of its template-arguments for the template-parameters in the type-id of the alias template. */ t = tsubst (TREE_TYPE (gen_tmpl), arglist, complain, in_decl); /* Note that the call above (by indirectly calling register_specialization in tsubst_decl) registers the TYPE_DECL representing the specialization of the alias template. So next time someone substitutes ARGLIST for the template parms into the alias template (GEN_TMPL), she'll get that TYPE_DECL back. */ if (t == error_mark_node) return t; } else if (TREE_CODE (template_type) == ENUMERAL_TYPE) { if (!is_dependent_type) { set_current_access_from_decl (TYPE_NAME (template_type)); t = start_enum (TYPE_IDENTIFIER (template_type), NULL_TREE, tsubst (ENUM_UNDERLYING_TYPE (template_type), arglist, complain, in_decl), tsubst_attributes (TYPE_ATTRIBUTES (template_type), arglist, complain, in_decl), SCOPED_ENUM_P (template_type), NULL); if (t == error_mark_node) return t; } else { /* We don't want to call start_enum for this type, since the values for the enumeration constants may involve template parameters. And, no one should be interested in the enumeration constants for such a type. */ t = cxx_make_type (ENUMERAL_TYPE); SET_SCOPED_ENUM_P (t, SCOPED_ENUM_P (template_type)); } SET_OPAQUE_ENUM_P (t, OPAQUE_ENUM_P (template_type)); ENUM_FIXED_UNDERLYING_TYPE_P (t) = ENUM_FIXED_UNDERLYING_TYPE_P (template_type); } else if (CLASS_TYPE_P (template_type)) { /* Lambda closures are regenerated in tsubst_lambda_expr, not instantiated here. */ gcc_assert (!LAMBDA_TYPE_P (template_type)); t = make_class_type (TREE_CODE (template_type)); CLASSTYPE_DECLARED_CLASS (t) = CLASSTYPE_DECLARED_CLASS (template_type); SET_CLASSTYPE_IMPLICIT_INSTANTIATION (t); /* A local class. Make sure the decl gets registered properly. */ if (context == current_function_decl) if (pushtag (DECL_NAME (gen_tmpl), t) == error_mark_node) return error_mark_node; if (comp_template_args (CLASSTYPE_TI_ARGS (template_type), arglist)) /* This instantiation is another name for the primary template type. Set the TYPE_CANONICAL field appropriately. */ TYPE_CANONICAL (t) = template_type; else if (any_template_arguments_need_structural_equality_p (arglist)) /* Some of the template arguments require structural equality testing, so this template class requires structural equality testing. */ SET_TYPE_STRUCTURAL_EQUALITY (t); } else gcc_unreachable (); /* If we called start_enum or pushtag above, this information will already be set up. */ type_decl = TYPE_NAME (t); if (!type_decl) { TYPE_CONTEXT (t) = FROB_CONTEXT (context); type_decl = create_implicit_typedef (DECL_NAME (gen_tmpl), t); DECL_CONTEXT (type_decl) = TYPE_CONTEXT (t); DECL_SOURCE_LOCATION (type_decl) = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (template_type)); } if (CLASS_TYPE_P (template_type)) { TREE_PRIVATE (type_decl) = TREE_PRIVATE (TYPE_MAIN_DECL (template_type)); TREE_PROTECTED (type_decl) = TREE_PROTECTED (TYPE_MAIN_DECL (template_type)); if (CLASSTYPE_VISIBILITY_SPECIFIED (template_type)) { DECL_VISIBILITY_SPECIFIED (type_decl) = 1; DECL_VISIBILITY (type_decl) = CLASSTYPE_VISIBILITY (template_type); } } if (OVERLOAD_TYPE_P (t) && !DECL_ALIAS_TEMPLATE_P (gen_tmpl)) { static const char *tags[] = {"abi_tag", "may_alias"}; for (unsigned ix = 0; ix != 2; ix++) { tree attributes = lookup_attribute (tags[ix], TYPE_ATTRIBUTES (template_type)); if (attributes) TYPE_ATTRIBUTES (t) = tree_cons (TREE_PURPOSE (attributes), TREE_VALUE (attributes), TYPE_ATTRIBUTES (t)); } } /* Let's consider the explicit specialization of a member of a class template specialization that is implicitly instantiated, e.g.: template<class T> struct S { template<class U> struct M {}; //#0 }; template<> template<> struct S<int>::M<char> //#1 { int i; }; [temp.expl.spec]/4 says this is valid. In this case, when we write: S<int>::M<char> m; M is instantiated from the CLASSTYPE_TI_TEMPLATE of #1, not from the one of #0. When we encounter #1, we want to store the partial instantiation of M (template<class T> S<int>::M<T>) in its CLASSTYPE_TI_TEMPLATE. For all cases other than this "explicit specialization of member of a class template", we just want to store the most general template into the CLASSTYPE_TI_TEMPLATE of M. This case of "explicit specialization of member of a class template" only happens when: 1/ the enclosing class is an instantiation of, and therefore not the same as, the context of the most general template, and 2/ we aren't looking at the partial instantiation itself, i.e. the innermost arguments are not the same as the innermost parms of the most general template. So it's only when 1/ and 2/ happens that we want to use the partial instantiation of the member template in lieu of its most general template. */ if (PRIMARY_TEMPLATE_P (gen_tmpl) && TMPL_ARGS_HAVE_MULTIPLE_LEVELS (arglist) /* the enclosing class must be an instantiation... */ && CLASS_TYPE_P (context) && !same_type_p (context, DECL_CONTEXT (gen_tmpl))) { TREE_VEC_LENGTH (arglist)--; ++processing_template_decl; tree tinfo = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (TREE_TYPE (gen_tmpl)); tree partial_inst_args = tsubst (INNERMOST_TEMPLATE_ARGS (TI_ARGS (tinfo)), arglist, complain, NULL_TREE); --processing_template_decl; TREE_VEC_LENGTH (arglist)++; if (partial_inst_args == error_mark_node) return error_mark_node; use_partial_inst_tmpl = /*...and we must not be looking at the partial instantiation itself. */ !comp_template_args (INNERMOST_TEMPLATE_ARGS (arglist), partial_inst_args); } if (!use_partial_inst_tmpl) /* This case is easy; there are no member templates involved. */ found = gen_tmpl; else { /* This is a full instantiation of a member template. Find the partial instantiation of which this is an instance. */ /* Temporarily reduce by one the number of levels in the ARGLIST so as to avoid comparing the last set of arguments. */ TREE_VEC_LENGTH (arglist)--; found = tsubst (gen_tmpl, arglist, complain, NULL_TREE); TREE_VEC_LENGTH (arglist)++; /* FOUND is either a proper class type, or an alias template specialization. In the later case, it's a TYPE_DECL, resulting from the substituting of arguments for parameters in the TYPE_DECL of the alias template done earlier. So be careful while getting the template of FOUND. */ found = (TREE_CODE (found) == TEMPLATE_DECL ? found : (TREE_CODE (found) == TYPE_DECL ? DECL_TI_TEMPLATE (found) : CLASSTYPE_TI_TEMPLATE (found))); if (DECL_CLASS_TEMPLATE_P (found) && CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (found))) { /* If this partial instantiation is specialized, we want to use it for hash table lookup. */ elt.tmpl = found; elt.args = arglist = INNERMOST_TEMPLATE_ARGS (arglist); hash = spec_hasher::hash (&elt); } } /* Build template info for the new specialization. */ if (TYPE_ALIAS_P (t)) { /* This is constructed during instantiation of the alias decl. But for member templates of template classes, that is not correct as we need to refer to the partially instantiated template, not the most general template. The incorrect knowledge will not have escaped this instantiation process, so we're good just updating the template_info we made then. */ tree ti = DECL_TEMPLATE_INFO (TYPE_NAME (t)); gcc_checking_assert (template_args_equal (TI_ARGS (ti), arglist)); if (TI_TEMPLATE (ti) != found) { gcc_checking_assert (DECL_TI_TEMPLATE (found) == TI_TEMPLATE (ti)); TI_TEMPLATE (ti) = found; } } else SET_TYPE_TEMPLATE_INFO (t, build_template_info (found, arglist)); elt.spec = t; slot = type_specializations->find_slot_with_hash (&elt, hash, INSERT); gcc_checking_assert (*slot == NULL); entry = ggc_alloc<spec_entry> (); *entry = elt; *slot = entry; /* Note this use of the partial instantiation so we can check it later in maybe_process_partial_specialization. */ DECL_TEMPLATE_INSTANTIATIONS (found) = tree_cons (arglist, t, DECL_TEMPLATE_INSTANTIATIONS (found)); if (TREE_CODE (template_type) == ENUMERAL_TYPE && !is_dependent_type && !DECL_ALIAS_TEMPLATE_P (gen_tmpl)) /* Now that the type has been registered on the instantiations list, we set up the enumerators. Because the enumeration constants may involve the enumeration type itself, we make sure to register the type first, and then create the constants. That way, doing tsubst_expr for the enumeration constants won't result in recursive calls here; we'll find the instantiation and exit above. */ tsubst_enum (template_type, t, arglist); if (CLASS_TYPE_P (template_type) && is_dependent_type) /* If the type makes use of template parameters, the code that generates debugging information will crash. */ DECL_IGNORED_P (TYPE_MAIN_DECL (t)) = 1; /* Possibly limit visibility based on template args. */ TREE_PUBLIC (type_decl) = 1; determine_visibility (type_decl); inherit_targ_abi_tags (t); return t; } } /* Wrapper for lookup_template_class_1. */ tree lookup_template_class (tree d1, tree arglist, tree in_decl, tree context, int entering_scope, tsubst_flags_t complain) { tree ret; timevar_push (TV_TEMPLATE_INST); ret = lookup_template_class_1 (d1, arglist, in_decl, context, entering_scope, complain); timevar_pop (TV_TEMPLATE_INST); return ret; } /* Return a TEMPLATE_ID_EXPR for the given variable template and ARGLIST. */ tree lookup_template_variable (tree templ, tree arglist) { if (flag_concepts && variable_concept_p (templ)) return build_concept_check (templ, arglist, tf_none); /* The type of the expression is NULL_TREE since the template-id could refer to an explicit or partial specialization. */ return build2 (TEMPLATE_ID_EXPR, NULL_TREE, templ, arglist); } /* Instantiate a variable declaration from a TEMPLATE_ID_EXPR for use. */ tree finish_template_variable (tree var, tsubst_flags_t complain) { tree templ = TREE_OPERAND (var, 0); tree arglist = TREE_OPERAND (var, 1); tree tmpl_args = DECL_TI_ARGS (DECL_TEMPLATE_RESULT (templ)); arglist = add_outermost_template_args (tmpl_args, arglist); templ = most_general_template (templ); tree parms = DECL_TEMPLATE_PARMS (templ); arglist = coerce_innermost_template_parms (parms, arglist, templ, complain, /*req_all*/true, /*use_default*/true); if (arglist == error_mark_node) return error_mark_node; if (flag_concepts && !constraints_satisfied_p (templ, arglist)) { if (complain & tf_error) { auto_diagnostic_group d; error ("use of invalid variable template %qE", var); diagnose_constraints (location_of (var), templ, arglist); } return error_mark_node; } return instantiate_template (templ, arglist, complain); } /* Construct a TEMPLATE_ID_EXPR for the given variable template TEMPL having TARGS template args, and instantiate it if it's not dependent. */ tree lookup_and_finish_template_variable (tree templ, tree targs, tsubst_flags_t complain) { templ = lookup_template_variable (templ, targs); if (!any_dependent_template_arguments_p (targs)) { templ = finish_template_variable (templ, complain); mark_used (templ); } return convert_from_reference (templ); } /* If the set of template parameters PARMS contains a template parameter at the given LEVEL and INDEX, then return this parameter. Otherwise return NULL_TREE. */ static tree corresponding_template_parameter (tree parms, int level, int index) { while (TMPL_PARMS_DEPTH (parms) > level) parms = TREE_CHAIN (parms); if (TMPL_PARMS_DEPTH (parms) != level || TREE_VEC_LENGTH (TREE_VALUE (parms)) <= index) return NULL_TREE; tree t = TREE_VALUE (TREE_VEC_ELT (TREE_VALUE (parms), index)); /* As in template_parm_to_arg. */ if (TREE_CODE (t) == TYPE_DECL || TREE_CODE (t) == TEMPLATE_DECL) t = TREE_TYPE (t); else t = DECL_INITIAL (t); gcc_assert (TEMPLATE_PARM_P (t)); return t; } /* Return the template parameter from PARMS that positionally corresponds to the template parameter PARM, or else return NULL_TREE. */ static tree corresponding_template_parameter (tree parms, tree parm) { int level, index; template_parm_level_and_index (parm, &level, &index); return corresponding_template_parameter (parms, level, index); } struct pair_fn_data { tree_fn_t fn; tree_fn_t any_fn; void *data; /* True when we should also visit template parameters that occur in non-deduced contexts. */ bool include_nondeduced_p; hash_set<tree> *visited; }; /* Called from for_each_template_parm via walk_tree. */ static tree for_each_template_parm_r (tree *tp, int *walk_subtrees, void *d) { tree t = *tp; struct pair_fn_data *pfd = (struct pair_fn_data *) d; tree_fn_t fn = pfd->fn; void *data = pfd->data; tree result = NULL_TREE; #define WALK_SUBTREE(NODE) \ do \ { \ result = for_each_template_parm (NODE, fn, data, pfd->visited, \ pfd->include_nondeduced_p, \ pfd->any_fn); \ if (result) goto out; \ } \ while (0) if (pfd->any_fn && (*pfd->any_fn)(t, data)) return t; if (TYPE_P (t) && (pfd->include_nondeduced_p || TREE_CODE (t) != TYPENAME_TYPE)) WALK_SUBTREE (TYPE_CONTEXT (t)); switch (TREE_CODE (t)) { case RECORD_TYPE: if (TYPE_PTRMEMFUNC_P (t)) break; /* Fall through. */ case UNION_TYPE: case ENUMERAL_TYPE: if (!TYPE_TEMPLATE_INFO (t)) *walk_subtrees = 0; else WALK_SUBTREE (TYPE_TI_ARGS (t)); break; case INTEGER_TYPE: WALK_SUBTREE (TYPE_MIN_VALUE (t)); WALK_SUBTREE (TYPE_MAX_VALUE (t)); break; case METHOD_TYPE: /* Since we're not going to walk subtrees, we have to do this explicitly here. */ WALK_SUBTREE (TYPE_METHOD_BASETYPE (t)); /* Fall through. */ case FUNCTION_TYPE: /* Check the return type. */ WALK_SUBTREE (TREE_TYPE (t)); /* Check the parameter types. Since default arguments are not instantiated until they are needed, the TYPE_ARG_TYPES may contain expressions that involve template parameters. But, no-one should be looking at them yet. And, once they're instantiated, they don't contain template parameters, so there's no point in looking at them then, either. */ { tree parm; for (parm = TYPE_ARG_TYPES (t); parm; parm = TREE_CHAIN (parm)) WALK_SUBTREE (TREE_VALUE (parm)); /* Since we've already handled the TYPE_ARG_TYPES, we don't want walk_tree walking into them itself. */ *walk_subtrees = 0; } if (flag_noexcept_type) { tree spec = TYPE_RAISES_EXCEPTIONS (t); if (spec) WALK_SUBTREE (TREE_PURPOSE (spec)); } break; case TYPEOF_TYPE: case DECLTYPE_TYPE: case UNDERLYING_TYPE: if (pfd->include_nondeduced_p && for_each_template_parm (TYPE_VALUES_RAW (t), fn, data, pfd->visited, pfd->include_nondeduced_p, pfd->any_fn)) return error_mark_node; *walk_subtrees = false; break; case FUNCTION_DECL: case VAR_DECL: if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t)) WALK_SUBTREE (DECL_TI_ARGS (t)); /* Fall through. */ case PARM_DECL: case CONST_DECL: if (TREE_CODE (t) == CONST_DECL && DECL_TEMPLATE_PARM_P (t)) WALK_SUBTREE (DECL_INITIAL (t)); if (DECL_CONTEXT (t) && pfd->include_nondeduced_p) WALK_SUBTREE (DECL_CONTEXT (t)); break; case BOUND_TEMPLATE_TEMPLATE_PARM: /* Record template parameters such as `T' inside `TT<T>'. */ WALK_SUBTREE (TYPE_TI_ARGS (t)); /* Fall through. */ case TEMPLATE_TEMPLATE_PARM: case TEMPLATE_TYPE_PARM: case TEMPLATE_PARM_INDEX: if (fn && (*fn)(t, data)) return t; else if (!fn) return t; break; case TEMPLATE_DECL: /* A template template parameter is encountered. */ if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) WALK_SUBTREE (TREE_TYPE (t)); /* Already substituted template template parameter */ *walk_subtrees = 0; break; case TYPENAME_TYPE: /* A template-id in a TYPENAME_TYPE might be a deduced context after partial instantiation. */ WALK_SUBTREE (TYPENAME_TYPE_FULLNAME (t)); *walk_subtrees = 0; break; case CONSTRUCTOR: if (TREE_TYPE (t) && TYPE_PTRMEMFUNC_P (TREE_TYPE (t)) && pfd->include_nondeduced_p) WALK_SUBTREE (TYPE_PTRMEMFUNC_FN_TYPE (TREE_TYPE (t))); break; case INDIRECT_REF: case COMPONENT_REF: /* If there's no type, then this thing must be some expression involving template parameters. */ if (!fn && !TREE_TYPE (t)) return error_mark_node; break; case MODOP_EXPR: case CAST_EXPR: case IMPLICIT_CONV_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case STATIC_CAST_EXPR: case DYNAMIC_CAST_EXPR: case ARROW_EXPR: case DOTSTAR_EXPR: case TYPEID_EXPR: case PSEUDO_DTOR_EXPR: if (!fn) return error_mark_node; break; case SCOPE_REF: if (pfd->include_nondeduced_p) WALK_SUBTREE (TREE_OPERAND (t, 0)); break; case REQUIRES_EXPR: { if (!fn) return error_mark_node; /* Recursively walk the type of each constraint variable. */ tree p = TREE_OPERAND (t, 0); while (p) { WALK_SUBTREE (TREE_TYPE (p)); p = TREE_CHAIN (p); } } break; default: break; } #undef WALK_SUBTREE /* We didn't find any template parameters we liked. */ out: return result; } /* For each TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM, BOUND_TEMPLATE_TEMPLATE_PARM or TEMPLATE_PARM_INDEX in T, call FN with the parameter and the DATA. If FN returns nonzero, the iteration is terminated, and for_each_template_parm returns 1. Otherwise, the iteration continues. If FN never returns a nonzero value, the value returned by for_each_template_parm is 0. If FN is NULL, it is considered to be the function which always returns 1. If INCLUDE_NONDEDUCED_P, then this routine will also visit template parameters that occur in non-deduced contexts. When false, only visits those template parameters that can be deduced. */ static tree for_each_template_parm (tree t, tree_fn_t fn, void* data, hash_set<tree> *visited, bool include_nondeduced_p, tree_fn_t any_fn) { struct pair_fn_data pfd; tree result; /* Set up. */ pfd.fn = fn; pfd.any_fn = any_fn; pfd.data = data; pfd.include_nondeduced_p = include_nondeduced_p; /* Walk the tree. (Conceptually, we would like to walk without duplicates, but for_each_template_parm_r recursively calls for_each_template_parm, so we would need to reorganize a fair bit to use walk_tree_without_duplicates, so we keep our own visited list.) */ if (visited) pfd.visited = visited; else pfd.visited = new hash_set<tree>; result = cp_walk_tree (&t, for_each_template_parm_r, &pfd, pfd.visited); /* Clean up. */ if (!visited) { delete pfd.visited; pfd.visited = 0; } return result; } struct find_template_parameter_info { explicit find_template_parameter_info (tree ctx_parms) : parm_list (NULL_TREE), ctx_parms (ctx_parms), max_depth (TMPL_PARMS_DEPTH (ctx_parms)) {} hash_set<tree> visited; hash_set<tree> parms; tree parm_list; tree ctx_parms; int max_depth; }; /* Appends the declaration of T to the list in DATA. */ static int keep_template_parm (tree t, void* data) { find_template_parameter_info *ftpi = (find_template_parameter_info*)data; /* Template parameters declared within the expression are not part of the parameter mapping. For example, in this concept: template<typename T> concept C = requires { <expr> } -> same_as<int>; the return specifier same_as<int> declares a new decltype parameter that must not be part of the parameter mapping. The same is true for generic lambda parameters, lambda template parameters, etc. */ int level; int index; template_parm_level_and_index (t, &level, &index); if (level > ftpi->max_depth) return 0; if (TREE_CODE (t) == BOUND_TEMPLATE_TEMPLATE_PARM) /* We want the underlying TEMPLATE_TEMPLATE_PARM, not the BOUND_TEMPLATE_TEMPLATE_PARM itself. */ t = TREE_TYPE (TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (t)); /* This template parameter might be an argument to a cached dependent specalization that was formed earlier inside some other template, in which case the parameter is not among the ones that are in-scope. Look in CTX_PARMS to find the corresponding in-scope template parameter, and use it instead. */ if (tree in_scope = corresponding_template_parameter (ftpi->ctx_parms, t)) t = in_scope; /* Arguments like const T yield parameters like const T. This means that a template-id like X<T, const T> would yield two distinct parameters: T and const T. Adjust types to their unqualified versions. */ if (TYPE_P (t)) t = TYPE_MAIN_VARIANT (t); if (!ftpi->parms.add (t)) ftpi->parm_list = tree_cons (NULL_TREE, t, ftpi->parm_list); return 0; } /* Ensure that we recursively examine certain terms that are not normally visited in for_each_template_parm_r. */ static int any_template_parm_r (tree t, void *data) { find_template_parameter_info *ftpi = (find_template_parameter_info*)data; #define WALK_SUBTREE(NODE) \ do \ { \ for_each_template_parm (NODE, keep_template_parm, data, \ &ftpi->visited, true, \ any_template_parm_r); \ } \ while (0) /* A mention of a member alias/typedef is a use of all of its template arguments, including those from the enclosing class, so we don't use alias_template_specialization_p here. */ if (TYPE_P (t) && typedef_variant_p (t)) if (tree tinfo = TYPE_ALIAS_TEMPLATE_INFO (t)) WALK_SUBTREE (TI_ARGS (tinfo)); switch (TREE_CODE (t)) { case TEMPLATE_TYPE_PARM: /* Type constraints of a placeholder type may contain parameters. */ if (is_auto (t)) if (tree constr = PLACEHOLDER_TYPE_CONSTRAINTS (t)) WALK_SUBTREE (constr); break; case TEMPLATE_ID_EXPR: /* Search through references to variable templates. */ WALK_SUBTREE (TREE_OPERAND (t, 0)); WALK_SUBTREE (TREE_OPERAND (t, 1)); break; case TEMPLATE_PARM_INDEX: case PARM_DECL: /* A parameter or constraint variable may also depend on a template parameter without explicitly naming it. */ WALK_SUBTREE (TREE_TYPE (t)); break; case TEMPLATE_DECL: { /* If T is a member template that shares template parameters with ctx_parms, we need to mark all those parameters for mapping. */ tree dparms = DECL_TEMPLATE_PARMS (t); tree cparms = ftpi->ctx_parms; while (TMPL_PARMS_DEPTH (dparms) > ftpi->max_depth) dparms = TREE_CHAIN (dparms); while (TMPL_PARMS_DEPTH (cparms) > TMPL_PARMS_DEPTH (dparms)) cparms = TREE_CHAIN (cparms); while (dparms && (TREE_TYPE (TREE_VALUE (dparms)) != TREE_TYPE (TREE_VALUE (cparms)))) dparms = TREE_CHAIN (dparms), cparms = TREE_CHAIN (cparms); if (dparms) { int ddepth = TMPL_PARMS_DEPTH (dparms); tree dargs = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (t))); for (int i = 0; i < ddepth; ++i) WALK_SUBTREE (TMPL_ARGS_LEVEL (dargs, i+1)); } } break; case LAMBDA_EXPR: { /* Look in the parms and body. */ tree fn = lambda_function (t); WALK_SUBTREE (TREE_TYPE (fn)); WALK_SUBTREE (DECL_SAVED_TREE (fn)); } break; case IDENTIFIER_NODE: if (IDENTIFIER_CONV_OP_P (t)) /* The conversion-type-id of a conversion operator may be dependent. */ WALK_SUBTREE (TREE_TYPE (t)); break; default: break; } /* Keep walking. */ return 0; } /* Returns a list of unique template parameters found within T, where CTX_PARMS are the template parameters in scope. */ tree find_template_parameters (tree t, tree ctx_parms) { if (!ctx_parms) return NULL_TREE; find_template_parameter_info ftpi (ctx_parms); for_each_template_parm (t, keep_template_parm, &ftpi, &ftpi.visited, /*include_nondeduced*/true, any_template_parm_r); return ftpi.parm_list; } /* Returns true if T depends on any template parameter. */ int uses_template_parms (tree t) { if (t == NULL_TREE) return false; bool dependent_p; int saved_processing_template_decl; saved_processing_template_decl = processing_template_decl; if (!saved_processing_template_decl) processing_template_decl = 1; if (TYPE_P (t)) dependent_p = dependent_type_p (t); else if (TREE_CODE (t) == TREE_VEC) dependent_p = any_dependent_template_arguments_p (t); else if (TREE_CODE (t) == TREE_LIST) dependent_p = (uses_template_parms (TREE_VALUE (t)) || uses_template_parms (TREE_CHAIN (t))); else if (TREE_CODE (t) == TYPE_DECL) dependent_p = dependent_type_p (TREE_TYPE (t)); else if (t == error_mark_node) dependent_p = false; else dependent_p = value_dependent_expression_p (t); processing_template_decl = saved_processing_template_decl; return dependent_p; } /* Returns true iff current_function_decl is an incompletely instantiated template. Useful instead of processing_template_decl because the latter is set to 0 during instantiate_non_dependent_expr. */ bool in_template_function (void) { tree fn = current_function_decl; bool ret; ++processing_template_decl; ret = (fn && DECL_LANG_SPECIFIC (fn) && DECL_TEMPLATE_INFO (fn) && any_dependent_template_arguments_p (DECL_TI_ARGS (fn))); --processing_template_decl; return ret; } /* Returns true if T depends on any template parameter with level LEVEL. */ bool uses_template_parms_level (tree t, int level) { return for_each_template_parm (t, template_parm_this_level_p, &level, NULL, /*include_nondeduced_p=*/true); } /* Returns true if the signature of DECL depends on any template parameter from its enclosing class. */ bool uses_outer_template_parms (tree decl) { int depth = template_class_depth (CP_DECL_CONTEXT (decl)); if (depth == 0) return false; if (for_each_template_parm (TREE_TYPE (decl), template_parm_outer_level, &depth, NULL, /*include_nondeduced_p=*/true)) return true; if (PRIMARY_TEMPLATE_P (decl) && for_each_template_parm (INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (decl)), template_parm_outer_level, &depth, NULL, /*include_nondeduced_p=*/true)) return true; tree ci = get_constraints (decl); if (ci) ci = CI_ASSOCIATED_CONSTRAINTS (ci); if (ci && for_each_template_parm (ci, template_parm_outer_level, &depth, NULL, /*nondeduced*/true)) return true; return false; } /* Returns TRUE iff INST is an instantiation we don't need to do in an ill-formed translation unit, i.e. a variable or function that isn't usable in a constant expression. */ static inline bool neglectable_inst_p (tree d) { return (d && DECL_P (d) && !undeduced_auto_decl (d) && !(TREE_CODE (d) == FUNCTION_DECL ? DECL_DECLARED_CONSTEXPR_P (d) : decl_maybe_constant_var_p (d))); } /* Returns TRUE iff we should refuse to instantiate DECL because it's neglectable and instantiated from within an erroneous instantiation. */ static bool limit_bad_template_recursion (tree decl) { struct tinst_level *lev = current_tinst_level; int errs = errorcount + sorrycount; if (lev == NULL || errs == 0 || !neglectable_inst_p (decl)) return false; for (; lev; lev = lev->next) if (neglectable_inst_p (lev->maybe_get_node ())) break; return (lev && errs > lev->errors); } static int tinst_depth; extern int max_tinst_depth; int depth_reached; static GTY(()) struct tinst_level *last_error_tinst_level; /* We're starting to instantiate D; record the template instantiation context at LOC for diagnostics and to restore it later. */ bool push_tinst_level_loc (tree tldcl, tree targs, location_t loc) { struct tinst_level *new_level; if (tinst_depth >= max_tinst_depth) { /* Tell error.c not to try to instantiate any templates. */ at_eof = 2; fatal_error (input_location, "template instantiation depth exceeds maximum of %d" " (use %<-ftemplate-depth=%> to increase the maximum)", max_tinst_depth); return false; } /* If the current instantiation caused problems, don't let it instantiate anything else. Do allow deduction substitution and decls usable in constant expressions. */ if (!targs && limit_bad_template_recursion (tldcl)) { /* Avoid no_linkage_errors and unused function warnings for this decl. */ TREE_NO_WARNING (tldcl) = 1; return false; } /* When not -quiet, dump template instantiations other than functions, since announce_function will take care of those. */ if (!quiet_flag && !targs && TREE_CODE (tldcl) != TREE_LIST && TREE_CODE (tldcl) != FUNCTION_DECL) fprintf (stderr, " %s", decl_as_string (tldcl, TFF_DECL_SPECIFIERS)); new_level = tinst_level_freelist ().alloc (); new_level->tldcl = tldcl; new_level->targs = targs; new_level->locus = loc; new_level->errors = errorcount + sorrycount; new_level->next = NULL; new_level->refcount = 0; set_refcount_ptr (new_level->next, current_tinst_level); set_refcount_ptr (current_tinst_level, new_level); ++tinst_depth; if (GATHER_STATISTICS && (tinst_depth > depth_reached)) depth_reached = tinst_depth; return true; } /* We're starting substitution of TMPL<ARGS>; record the template substitution context for diagnostics and to restore it later. */ bool push_tinst_level (tree tmpl, tree args) { return push_tinst_level_loc (tmpl, args, input_location); } /* We're starting to instantiate D; record INPUT_LOCATION and the template instantiation context for diagnostics and to restore it later. */ bool push_tinst_level (tree d) { return push_tinst_level_loc (d, input_location); } /* Likewise, but record LOC as the program location. */ bool push_tinst_level_loc (tree d, location_t loc) { gcc_assert (TREE_CODE (d) != TREE_LIST); return push_tinst_level_loc (d, NULL, loc); } /* We're done instantiating this template; return to the instantiation context. */ void pop_tinst_level (void) { /* Restore the filename and line number stashed away when we started this instantiation. */ input_location = current_tinst_level->locus; set_refcount_ptr (current_tinst_level, current_tinst_level->next); --tinst_depth; } /* We're instantiating a deferred template; restore the template instantiation context in which the instantiation was requested, which is one step out from LEVEL. Return the corresponding DECL or TYPE. */ static tree reopen_tinst_level (struct tinst_level *level) { struct tinst_level *t; tinst_depth = 0; for (t = level; t; t = t->next) ++tinst_depth; set_refcount_ptr (current_tinst_level, level); pop_tinst_level (); if (current_tinst_level) current_tinst_level->errors = errorcount+sorrycount; return level->maybe_get_node (); } /* Returns the TINST_LEVEL which gives the original instantiation context. */ struct tinst_level * outermost_tinst_level (void) { struct tinst_level *level = current_tinst_level; if (level) while (level->next) level = level->next; return level; } /* DECL is a friend FUNCTION_DECL or TEMPLATE_DECL. ARGS is the vector of template arguments, as for tsubst. Returns an appropriate tsubst'd friend declaration. */ static tree tsubst_friend_function (tree decl, tree args) { tree new_friend; if (TREE_CODE (decl) == FUNCTION_DECL && DECL_TEMPLATE_INSTANTIATION (decl) && TREE_CODE (DECL_TI_TEMPLATE (decl)) != TEMPLATE_DECL) /* This was a friend declared with an explicit template argument list, e.g.: friend void f<>(T); to indicate that f was a template instantiation, not a new function declaration. Now, we have to figure out what instantiation of what template. */ { tree template_id, arglist, fns; tree new_args; tree tmpl; tree ns = decl_namespace_context (TYPE_MAIN_DECL (current_class_type)); /* Friend functions are looked up in the containing namespace scope. We must enter that scope, to avoid finding member functions of the current class with same name. */ push_nested_namespace (ns); fns = tsubst_expr (DECL_TI_TEMPLATE (decl), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/false); pop_nested_namespace (ns); arglist = tsubst (DECL_TI_ARGS (decl), args, tf_warning_or_error, NULL_TREE); template_id = lookup_template_function (fns, arglist); new_friend = tsubst (decl, args, tf_warning_or_error, NULL_TREE); tmpl = determine_specialization (template_id, new_friend, &new_args, /*need_member_template=*/0, TREE_VEC_LENGTH (args), tsk_none); return instantiate_template (tmpl, new_args, tf_error); } new_friend = tsubst (decl, args, tf_warning_or_error, NULL_TREE); if (new_friend == error_mark_node) return error_mark_node; /* The NEW_FRIEND will look like an instantiation, to the compiler, but is not an instantiation from the point of view of the language. For example, we might have had: template <class T> struct S { template <class U> friend void f(T, U); }; Then, in S<int>, template <class U> void f(int, U) is not an instantiation of anything. */ DECL_USE_TEMPLATE (new_friend) = 0; if (TREE_CODE (new_friend) == TEMPLATE_DECL) { DECL_USE_TEMPLATE (DECL_TEMPLATE_RESULT (new_friend)) = 0; DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (new_friend)) = DECL_SAVED_TREE (DECL_TEMPLATE_RESULT (decl)); /* Substitute TEMPLATE_PARMS_CONSTRAINTS so that parameter levels will match in decls_match. */ tree parms = DECL_TEMPLATE_PARMS (new_friend); tree treqs = TEMPLATE_PARMS_CONSTRAINTS (parms); treqs = maybe_substitute_reqs_for (treqs, new_friend); TEMPLATE_PARMS_CONSTRAINTS (parms) = treqs; } /* The mangled name for the NEW_FRIEND is incorrect. The function is not a template instantiation and should not be mangled like one. Therefore, we forget the mangling here; we'll recompute it later if we need it. */ if (TREE_CODE (new_friend) != TEMPLATE_DECL) { SET_DECL_RTL (new_friend, NULL); SET_DECL_ASSEMBLER_NAME (new_friend, NULL_TREE); } if (DECL_NAMESPACE_SCOPE_P (new_friend)) { tree old_decl; tree ns; /* We must save some information from NEW_FRIEND before calling duplicate decls since that function will free NEW_FRIEND if possible. */ tree new_friend_template_info = DECL_TEMPLATE_INFO (new_friend); tree new_friend_result_template_info = NULL_TREE; bool new_friend_is_defn = (DECL_INITIAL (DECL_TEMPLATE_RESULT (template_for_substitution (new_friend))) != NULL_TREE); tree not_tmpl = new_friend; if (TREE_CODE (new_friend) == TEMPLATE_DECL) { /* This declaration is a `primary' template. */ DECL_PRIMARY_TEMPLATE (new_friend) = new_friend; not_tmpl = DECL_TEMPLATE_RESULT (new_friend); new_friend_result_template_info = DECL_TEMPLATE_INFO (not_tmpl); } /* Inside pushdecl_namespace_level, we will push into the current namespace. However, the friend function should go into the namespace of the template. */ ns = decl_namespace_context (new_friend); push_nested_namespace (ns); old_decl = pushdecl_namespace_level (new_friend, /*hiding=*/true); pop_nested_namespace (ns); if (old_decl == error_mark_node) return error_mark_node; if (old_decl != new_friend) { /* This new friend declaration matched an existing declaration. For example, given: template <class T> void f(T); template <class U> class C { template <class T> friend void f(T) {} }; the friend declaration actually provides the definition of `f', once C has been instantiated for some type. So, old_decl will be the out-of-class template declaration, while new_friend is the in-class definition. But, if `f' was called before this point, the instantiation of `f' will have DECL_TI_ARGS corresponding to `T' but not to `U', references to which might appear in the definition of `f'. Previously, the most general template for an instantiation of `f' was the out-of-class version; now it is the in-class version. Therefore, we run through all specialization of `f', adding to their DECL_TI_ARGS appropriately. In particular, they need a new set of outer arguments, corresponding to the arguments for this class instantiation. The same situation can arise with something like this: friend void f(int); template <class T> class C { friend void f(T) {} }; when `C<int>' is instantiated. Now, `f(int)' is defined in the class. */ if (!new_friend_is_defn) /* On the other hand, if the in-class declaration does *not* provide a definition, then we don't want to alter existing definitions. We can just leave everything alone. */ ; else { tree new_template = TI_TEMPLATE (new_friend_template_info); tree new_args = TI_ARGS (new_friend_template_info); /* Overwrite whatever template info was there before, if any, with the new template information pertaining to the declaration. */ DECL_TEMPLATE_INFO (old_decl) = new_friend_template_info; if (TREE_CODE (old_decl) != TEMPLATE_DECL) { /* We should have called reregister_specialization in duplicate_decls. */ gcc_assert (retrieve_specialization (new_template, new_args, 0) == old_decl); /* Instantiate it if the global has already been used. */ if (DECL_ODR_USED (old_decl)) instantiate_decl (old_decl, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/false); } else { tree t; /* Indicate that the old function template is a partial instantiation. */ DECL_TEMPLATE_INFO (DECL_TEMPLATE_RESULT (old_decl)) = new_friend_result_template_info; gcc_assert (new_template == most_general_template (new_template)); gcc_assert (new_template != old_decl); /* Reassign any specializations already in the hash table to the new more general template, and add the additional template args. */ for (t = DECL_TEMPLATE_INSTANTIATIONS (old_decl); t != NULL_TREE; t = TREE_CHAIN (t)) { tree spec = TREE_VALUE (t); spec_entry elt; elt.tmpl = old_decl; elt.args = DECL_TI_ARGS (spec); elt.spec = NULL_TREE; decl_specializations->remove_elt (&elt); DECL_TI_ARGS (spec) = add_outermost_template_args (new_args, DECL_TI_ARGS (spec)); register_specialization (spec, new_template, DECL_TI_ARGS (spec), true, 0); } DECL_TEMPLATE_INSTANTIATIONS (old_decl) = NULL_TREE; } } /* The information from NEW_FRIEND has been merged into OLD_DECL by duplicate_decls. */ new_friend = old_decl; } } else { tree context = DECL_CONTEXT (new_friend); bool dependent_p; /* In the code template <class T> class C { template <class U> friend void C1<U>::f (); // case 1 friend void C2<T>::f (); // case 2 }; we only need to make sure CONTEXT is a complete type for case 2. To distinguish between the two cases, we note that CONTEXT of case 1 remains dependent type after tsubst while this isn't true for case 2. */ ++processing_template_decl; dependent_p = dependent_type_p (context); --processing_template_decl; if (!dependent_p && !complete_type_or_else (context, NULL_TREE)) return error_mark_node; if (COMPLETE_TYPE_P (context)) { tree fn = new_friend; /* do_friend adds the TEMPLATE_DECL for any member friend template even if it isn't a member template, i.e. template <class T> friend A<T>::f(); Look through it in that case. */ if (TREE_CODE (fn) == TEMPLATE_DECL && !PRIMARY_TEMPLATE_P (fn)) fn = DECL_TEMPLATE_RESULT (fn); /* Check to see that the declaration is really present, and, possibly obtain an improved declaration. */ fn = check_classfn (context, fn, NULL_TREE); if (fn) new_friend = fn; } } return new_friend; } /* FRIEND_TMPL is a friend TEMPLATE_DECL. ARGS is the vector of template arguments, as for tsubst. Returns an appropriate tsubst'd friend type or error_mark_node on failure. */ static tree tsubst_friend_class (tree friend_tmpl, tree args) { tree tmpl; if (DECL_TEMPLATE_TEMPLATE_PARM_P (friend_tmpl)) { tmpl = tsubst (TREE_TYPE (friend_tmpl), args, tf_none, NULL_TREE); return TREE_TYPE (tmpl); } tree context = CP_DECL_CONTEXT (friend_tmpl); if (TREE_CODE (context) == NAMESPACE_DECL) push_nested_namespace (context); else { context = tsubst (context, args, tf_error, NULL_TREE); push_nested_class (context); } tmpl = lookup_name (DECL_NAME (friend_tmpl), LOOK_where::CLASS_NAMESPACE, LOOK_want::NORMAL | LOOK_want::HIDDEN_FRIEND); if (tmpl && DECL_CLASS_TEMPLATE_P (tmpl)) { /* The friend template has already been declared. Just check to see that the declarations match, and install any new default parameters. We must tsubst the default parameters, of course. We only need the innermost template parameters because that is all that redeclare_class_template will look at. */ if (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (friend_tmpl)) > TMPL_ARGS_DEPTH (args)) { tree parms = tsubst_template_parms (DECL_TEMPLATE_PARMS (friend_tmpl), args, tf_warning_or_error); location_t saved_input_location = input_location; input_location = DECL_SOURCE_LOCATION (friend_tmpl); tree cons = get_constraints (tmpl); redeclare_class_template (TREE_TYPE (tmpl), parms, cons); input_location = saved_input_location; } } else { /* The friend template has not already been declared. In this case, the instantiation of the template class will cause the injection of this template into the namespace scope. */ tmpl = tsubst (friend_tmpl, args, tf_warning_or_error, NULL_TREE); if (tmpl != error_mark_node) { /* The new TMPL is not an instantiation of anything, so we forget its origins. We don't reset CLASSTYPE_TI_TEMPLATE for the new type because that is supposed to be the corresponding template decl, i.e., TMPL. */ DECL_USE_TEMPLATE (tmpl) = 0; DECL_TEMPLATE_INFO (tmpl) = NULL_TREE; CLASSTYPE_USE_TEMPLATE (TREE_TYPE (tmpl)) = 0; CLASSTYPE_TI_ARGS (TREE_TYPE (tmpl)) = INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (TREE_TYPE (tmpl))); /* Substitute into and set the constraints on the new declaration. */ if (tree ci = get_constraints (friend_tmpl)) { ++processing_template_decl; ci = tsubst_constraint_info (ci, args, tf_warning_or_error, DECL_FRIEND_CONTEXT (friend_tmpl)); --processing_template_decl; set_constraints (tmpl, ci); } /* Inject this template into the enclosing namspace scope. */ tmpl = pushdecl_namespace_level (tmpl, /*hiding=*/true); } } if (TREE_CODE (context) == NAMESPACE_DECL) pop_nested_namespace (context); else pop_nested_class (); return TREE_TYPE (tmpl); } /* Returns zero if TYPE cannot be completed later due to circularity. Otherwise returns one. */ static int can_complete_type_without_circularity (tree type) { if (type == NULL_TREE || type == error_mark_node) return 0; else if (COMPLETE_TYPE_P (type)) return 1; else if (TREE_CODE (type) == ARRAY_TYPE) return can_complete_type_without_circularity (TREE_TYPE (type)); else if (CLASS_TYPE_P (type) && TYPE_BEING_DEFINED (TYPE_MAIN_VARIANT (type))) return 0; else return 1; } static tree tsubst_omp_clauses (tree, enum c_omp_region_type, tree, tsubst_flags_t, tree); /* Instantiate a single dependent attribute T (a TREE_LIST), and return either T or a new TREE_LIST, possibly a chain in the case of a pack expansion. */ static tree tsubst_attribute (tree t, tree *decl_p, tree args, tsubst_flags_t complain, tree in_decl) { gcc_assert (ATTR_IS_DEPENDENT (t)); tree val = TREE_VALUE (t); if (val == NULL_TREE) /* Nothing to do. */; else if ((flag_openmp || flag_openmp_simd) && is_attribute_p ("omp declare simd", get_attribute_name (t))) { tree clauses = TREE_VALUE (val); clauses = tsubst_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD, args, complain, in_decl); c_omp_declare_simd_clauses_to_decls (*decl_p, clauses); clauses = finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD); tree parms = DECL_ARGUMENTS (*decl_p); clauses = c_omp_declare_simd_clauses_to_numbers (parms, clauses); if (clauses) val = build_tree_list (NULL_TREE, clauses); else val = NULL_TREE; } else if (flag_openmp && is_attribute_p ("omp declare variant base", get_attribute_name (t))) { ++cp_unevaluated_operand; tree varid = tsubst_expr (TREE_PURPOSE (val), args, complain, in_decl, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; tree chain = TREE_CHAIN (val); location_t match_loc = cp_expr_loc_or_input_loc (TREE_PURPOSE (chain)); tree ctx = copy_list (TREE_VALUE (val)); tree simd = get_identifier ("simd"); tree score = get_identifier (" score"); tree condition = get_identifier ("condition"); for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1)) { const char *set = IDENTIFIER_POINTER (TREE_PURPOSE (t1)); TREE_VALUE (t1) = copy_list (TREE_VALUE (t1)); for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2)) { if (TREE_PURPOSE (t2) == simd && set[0] == 'c') { tree clauses = TREE_VALUE (t2); clauses = tsubst_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD, args, complain, in_decl); c_omp_declare_simd_clauses_to_decls (*decl_p, clauses); clauses = finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD); TREE_VALUE (t2) = clauses; } else { TREE_VALUE (t2) = copy_list (TREE_VALUE (t2)); for (tree t3 = TREE_VALUE (t2); t3; t3 = TREE_CHAIN (t3)) if (TREE_VALUE (t3)) { bool allow_string = ((TREE_PURPOSE (t2) != condition || set[0] != 'u') && TREE_PURPOSE (t3) != score); tree v = TREE_VALUE (t3); if (TREE_CODE (v) == STRING_CST && allow_string) continue; v = tsubst_expr (v, args, complain, in_decl, true); v = fold_non_dependent_expr (v); if (!INTEGRAL_TYPE_P (TREE_TYPE (v)) || (TREE_PURPOSE (t3) == score ? TREE_CODE (v) != INTEGER_CST : !tree_fits_shwi_p (v))) { location_t loc = cp_expr_loc_or_loc (TREE_VALUE (t3), match_loc); if (TREE_PURPOSE (t3) == score) error_at (loc, "score argument must be " "constant integer expression"); else if (allow_string) error_at (loc, "property must be constant " "integer expression or string " "literal"); else error_at (loc, "property must be constant " "integer expression"); return NULL_TREE; } else if (TREE_PURPOSE (t3) == score && tree_int_cst_sgn (v) < 0) { location_t loc = cp_expr_loc_or_loc (TREE_VALUE (t3), match_loc); error_at (loc, "score argument must be " "non-negative"); return NULL_TREE; } TREE_VALUE (t3) = v; } } } } val = tree_cons (varid, ctx, chain); } /* If the first attribute argument is an identifier, don't pass it through tsubst. Attributes like mode, format, cleanup and several target specific attributes expect it unmodified. */ else if (attribute_takes_identifier_p (get_attribute_name (t))) { tree chain = tsubst_expr (TREE_CHAIN (val), args, complain, in_decl, /*integral_constant_expression_p=*/false); if (chain != TREE_CHAIN (val)) val = tree_cons (NULL_TREE, TREE_VALUE (val), chain); } else if (PACK_EXPANSION_P (val)) { /* An attribute pack expansion. */ tree purp = TREE_PURPOSE (t); tree pack = tsubst_pack_expansion (val, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; int len = TREE_VEC_LENGTH (pack); tree list = NULL_TREE; tree *q = &list; for (int i = 0; i < len; ++i) { tree elt = TREE_VEC_ELT (pack, i); *q = build_tree_list (purp, elt); q = &TREE_CHAIN (*q); } return list; } else val = tsubst_expr (val, args, complain, in_decl, /*integral_constant_expression_p=*/false); if (val != TREE_VALUE (t)) return build_tree_list (TREE_PURPOSE (t), val); return t; } /* Instantiate any dependent attributes in ATTRIBUTES, returning either it unchanged or a new TREE_LIST chain. */ static tree tsubst_attributes (tree attributes, tree args, tsubst_flags_t complain, tree in_decl) { tree last_dep = NULL_TREE; for (tree t = attributes; t; t = TREE_CHAIN (t)) if (ATTR_IS_DEPENDENT (t)) { last_dep = t; attributes = copy_list (attributes); break; } if (last_dep) for (tree *p = &attributes; *p; ) { tree t = *p; if (ATTR_IS_DEPENDENT (t)) { tree subst = tsubst_attribute (t, NULL, args, complain, in_decl); if (subst != t) { *p = subst; while (*p) p = &TREE_CHAIN (*p); *p = TREE_CHAIN (t); continue; } } p = &TREE_CHAIN (*p); } return attributes; } /* Apply any attributes which had to be deferred until instantiation time. DECL_P, ATTRIBUTES and ATTR_FLAGS are as cplus_decl_attributes; ARGS, COMPLAIN, IN_DECL are as tsubst. */ static void apply_late_template_attributes (tree *decl_p, tree attributes, int attr_flags, tree args, tsubst_flags_t complain, tree in_decl) { tree last_dep = NULL_TREE; tree t; tree *p; if (attributes == NULL_TREE) return; if (DECL_P (*decl_p)) { if (TREE_TYPE (*decl_p) == error_mark_node) return; p = &DECL_ATTRIBUTES (*decl_p); /* DECL_ATTRIBUTES comes from copy_node in tsubst_decl, and is identical to our attributes parameter. */ gcc_assert (*p == attributes); } else { p = &TYPE_ATTRIBUTES (*decl_p); /* TYPE_ATTRIBUTES was set up (with abi_tag and may_alias) in lookup_template_class_1, and should be preserved. */ gcc_assert (*p != attributes); while (*p) p = &TREE_CHAIN (*p); } for (t = attributes; t; t = TREE_CHAIN (t)) if (ATTR_IS_DEPENDENT (t)) { last_dep = t; attributes = copy_list (attributes); break; } *p = attributes; if (last_dep) { tree late_attrs = NULL_TREE; tree *q = &late_attrs; for (; *p; ) { t = *p; if (ATTR_IS_DEPENDENT (t)) { *p = TREE_CHAIN (t); TREE_CHAIN (t) = NULL_TREE; *q = tsubst_attribute (t, decl_p, args, complain, in_decl); while (*q) q = &TREE_CHAIN (*q); } else p = &TREE_CHAIN (t); } cplus_decl_attributes (decl_p, late_attrs, attr_flags); } } /* The template TMPL is being instantiated with the template arguments TARGS. Perform the access checks that we deferred when parsing the template. */ static void perform_instantiation_time_access_checks (tree tmpl, tree targs) { unsigned i; deferred_access_check *chk; if (!CLASS_TYPE_P (tmpl) && TREE_CODE (tmpl) != FUNCTION_DECL) return; if (vec<deferred_access_check, va_gc> *access_checks = TI_DEFERRED_ACCESS_CHECKS (get_template_info (tmpl))) FOR_EACH_VEC_ELT (*access_checks, i, chk) { tree decl = chk->decl; tree diag_decl = chk->diag_decl; tree type_scope = TREE_TYPE (chk->binfo); if (uses_template_parms (type_scope)) type_scope = tsubst (type_scope, targs, tf_error, NULL_TREE); /* Make access check error messages point to the location of the use of the typedef. */ iloc_sentinel ils (chk->loc); perform_or_defer_access_check (TYPE_BINFO (type_scope), decl, diag_decl, tf_warning_or_error); } } static tree instantiate_class_template_1 (tree type) { tree templ, args, pattern, t, member; tree typedecl; tree pbinfo; tree base_list; unsigned int saved_maximum_field_alignment; tree fn_context; if (type == error_mark_node) return error_mark_node; if (COMPLETE_OR_OPEN_TYPE_P (type) || uses_template_parms (type)) return type; /* Figure out which template is being instantiated. */ templ = most_general_template (CLASSTYPE_TI_TEMPLATE (type)); gcc_assert (TREE_CODE (templ) == TEMPLATE_DECL); /* Mark the type as in the process of being defined. */ TYPE_BEING_DEFINED (type) = 1; /* We may be in the middle of deferred access check. Disable it now. */ deferring_access_check_sentinel acs (dk_no_deferred); /* Determine what specialization of the original template to instantiate. */ t = most_specialized_partial_spec (type, tf_warning_or_error); if (t == error_mark_node) return error_mark_node; else if (t) { /* This TYPE is actually an instantiation of a partial specialization. We replace the innermost set of ARGS with the arguments appropriate for substitution. For example, given: template <class T> struct S {}; template <class T> struct S<T*> {}; and supposing that we are instantiating S<int*>, ARGS will presently be {int*} -- but we need {int}. */ pattern = TREE_TYPE (t); args = TREE_PURPOSE (t); } else { pattern = TREE_TYPE (templ); args = CLASSTYPE_TI_ARGS (type); } /* If the template we're instantiating is incomplete, then clearly there's nothing we can do. */ if (!COMPLETE_TYPE_P (pattern)) { /* We can try again later. */ TYPE_BEING_DEFINED (type) = 0; return type; } /* If we've recursively instantiated too many templates, stop. */ if (! push_tinst_level (type)) return type; int saved_unevaluated_operand = cp_unevaluated_operand; int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; fn_context = decl_function_context (TYPE_MAIN_DECL (type)); /* Also avoid push_to_top_level for a lambda in an NSDMI. */ if (!fn_context && LAMBDA_TYPE_P (type) && TYPE_CLASS_SCOPE_P (type)) fn_context = error_mark_node; if (!fn_context) push_to_top_level (); else { cp_unevaluated_operand = 0; c_inhibit_evaluation_warnings = 0; } /* Use #pragma pack from the template context. */ saved_maximum_field_alignment = maximum_field_alignment; maximum_field_alignment = TYPE_PRECISION (pattern); SET_CLASSTYPE_INTERFACE_UNKNOWN (type); /* Set the input location to the most specialized template definition. This is needed if tsubsting causes an error. */ typedecl = TYPE_MAIN_DECL (pattern); input_location = DECL_SOURCE_LOCATION (TYPE_NAME (type)) = DECL_SOURCE_LOCATION (typedecl); TYPE_PACKED (type) = TYPE_PACKED (pattern); SET_TYPE_ALIGN (type, TYPE_ALIGN (pattern)); TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (pattern); CLASSTYPE_NON_AGGREGATE (type) = CLASSTYPE_NON_AGGREGATE (pattern); if (ANON_AGGR_TYPE_P (pattern)) SET_ANON_AGGR_TYPE_P (type); if (CLASSTYPE_VISIBILITY_SPECIFIED (pattern)) { CLASSTYPE_VISIBILITY_SPECIFIED (type) = 1; CLASSTYPE_VISIBILITY (type) = CLASSTYPE_VISIBILITY (pattern); /* Adjust visibility for template arguments. */ determine_visibility (TYPE_MAIN_DECL (type)); } if (CLASS_TYPE_P (type)) CLASSTYPE_FINAL (type) = CLASSTYPE_FINAL (pattern); pbinfo = TYPE_BINFO (pattern); /* We should never instantiate a nested class before its enclosing class; we need to look up the nested class by name before we can instantiate it, and that lookup should instantiate the enclosing class. */ gcc_assert (!DECL_CLASS_SCOPE_P (TYPE_MAIN_DECL (pattern)) || COMPLETE_OR_OPEN_TYPE_P (TYPE_CONTEXT (type))); base_list = NULL_TREE; if (BINFO_N_BASE_BINFOS (pbinfo)) { tree pbase_binfo; tree pushed_scope; int i; /* We must enter the scope containing the type, as that is where the accessibility of types named in dependent bases are looked up from. */ pushed_scope = push_scope (CP_TYPE_CONTEXT (type)); /* Substitute into each of the bases to determine the actual basetypes. */ for (i = 0; BINFO_BASE_ITERATE (pbinfo, i, pbase_binfo); i++) { tree base; tree access = BINFO_BASE_ACCESS (pbinfo, i); tree expanded_bases = NULL_TREE; int idx, len = 1; if (PACK_EXPANSION_P (BINFO_TYPE (pbase_binfo))) { expanded_bases = tsubst_pack_expansion (BINFO_TYPE (pbase_binfo), args, tf_error, NULL_TREE); if (expanded_bases == error_mark_node) continue; len = TREE_VEC_LENGTH (expanded_bases); } for (idx = 0; idx < len; idx++) { if (expanded_bases) /* Extract the already-expanded base class. */ base = TREE_VEC_ELT (expanded_bases, idx); else /* Substitute to figure out the base class. */ base = tsubst (BINFO_TYPE (pbase_binfo), args, tf_error, NULL_TREE); if (base == error_mark_node) continue; base_list = tree_cons (access, base, base_list); if (BINFO_VIRTUAL_P (pbase_binfo)) TREE_TYPE (base_list) = integer_type_node; } } /* The list is now in reverse order; correct that. */ base_list = nreverse (base_list); if (pushed_scope) pop_scope (pushed_scope); } /* Now call xref_basetypes to set up all the base-class information. */ xref_basetypes (type, base_list); apply_late_template_attributes (&type, TYPE_ATTRIBUTES (pattern), (int) ATTR_FLAG_TYPE_IN_PLACE, args, tf_error, NULL_TREE); fixup_attribute_variants (type); /* Now that our base classes are set up, enter the scope of the class, so that name lookups into base classes, etc. will work correctly. This is precisely analogous to what we do in begin_class_definition when defining an ordinary non-template class, except we also need to push the enclosing classes. */ push_nested_class (type); /* Now members are processed in the order of declaration. */ for (member = CLASSTYPE_DECL_LIST (pattern); member; member = TREE_CHAIN (member)) { tree t = TREE_VALUE (member); if (TREE_PURPOSE (member)) { if (TYPE_P (t)) { if (LAMBDA_TYPE_P (t)) /* A closure type for a lambda in an NSDMI or default argument. Ignore it; it will be regenerated when needed. */ continue; /* Build new CLASSTYPE_NESTED_UTDS. */ bool class_template_p = (TREE_CODE (t) != ENUMERAL_TYPE && TYPE_LANG_SPECIFIC (t) && CLASSTYPE_IS_TEMPLATE (t)); /* If the member is a class template, then -- even after substitution -- there may be dependent types in the template argument list for the class. We increment PROCESSING_TEMPLATE_DECL so that dependent_type_p, as that function will assume that no types are dependent when outside of a template. */ if (class_template_p) ++processing_template_decl; tree newtag = tsubst (t, args, tf_error, NULL_TREE); if (class_template_p) --processing_template_decl; if (newtag == error_mark_node) continue; if (TREE_CODE (newtag) != ENUMERAL_TYPE) { tree name = TYPE_IDENTIFIER (t); if (class_template_p) /* Unfortunately, lookup_template_class sets CLASSTYPE_IMPLICIT_INSTANTIATION for a partial instantiation (i.e., for the type of a member template class nested within a template class.) This behavior is required for maybe_process_partial_specialization to work correctly, but is not accurate in this case; the TAG is not an instantiation of anything. (The corresponding TEMPLATE_DECL is an instantiation, but the TYPE is not.) */ CLASSTYPE_USE_TEMPLATE (newtag) = 0; /* Now, we call pushtag to put this NEWTAG into the scope of TYPE. We first set up the IDENTIFIER_TYPE_VALUE to avoid pushtag calling push_template_decl. We don't have to do this for enums because it will already have been done in tsubst_enum. */ if (name) SET_IDENTIFIER_TYPE_VALUE (name, newtag); pushtag (name, newtag); } } else if (DECL_DECLARES_FUNCTION_P (t)) { tree r; if (TREE_CODE (t) == TEMPLATE_DECL) ++processing_template_decl; r = tsubst (t, args, tf_error, NULL_TREE); if (TREE_CODE (t) == TEMPLATE_DECL) --processing_template_decl; set_current_access_from_decl (r); finish_member_declaration (r); /* Instantiate members marked with attribute used. */ if (r != error_mark_node && DECL_PRESERVE_P (r)) mark_used (r); if (TREE_CODE (r) == FUNCTION_DECL && DECL_OMP_DECLARE_REDUCTION_P (r)) cp_check_omp_declare_reduction (r); } else if ((DECL_CLASS_TEMPLATE_P (t) || DECL_IMPLICIT_TYPEDEF_P (t)) && LAMBDA_TYPE_P (TREE_TYPE (t))) /* A closure type for a lambda in an NSDMI or default argument. Ignore it; it will be regenerated when needed. */; else { /* Build new TYPE_FIELDS. */ if (TREE_CODE (t) == STATIC_ASSERT) tsubst_expr (t, args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/true); else if (TREE_CODE (t) != CONST_DECL) { tree r; tree vec = NULL_TREE; int len = 1; gcc_checking_assert (TREE_CODE (t) != CONST_DECL); /* The file and line for this declaration, to assist in error message reporting. Since we called push_tinst_level above, we don't need to restore these. */ input_location = DECL_SOURCE_LOCATION (t); if (TREE_CODE (t) == TEMPLATE_DECL) ++processing_template_decl; r = tsubst (t, args, tf_warning_or_error, NULL_TREE); if (TREE_CODE (t) == TEMPLATE_DECL) --processing_template_decl; if (TREE_CODE (r) == TREE_VEC) { /* A capture pack became multiple fields. */ vec = r; len = TREE_VEC_LENGTH (vec); } for (int i = 0; i < len; ++i) { if (vec) r = TREE_VEC_ELT (vec, i); if (VAR_P (r)) { /* In [temp.inst]: [t]he initialization (and any associated side-effects) of a static data member does not occur unless the static data member is itself used in a way that requires the definition of the static data member to exist. Therefore, we do not substitute into the initialized for the static data member here. */ finish_static_data_member_decl (r, /*init=*/NULL_TREE, /*init_const_expr_p=*/false, /*asmspec_tree=*/NULL_TREE, /*flags=*/0); /* Instantiate members marked with attribute used. */ if (r != error_mark_node && DECL_PRESERVE_P (r)) mark_used (r); } else if (TREE_CODE (r) == FIELD_DECL) { /* Determine whether R has a valid type and can be completed later. If R is invalid, then its type is replaced by error_mark_node. */ tree rtype = TREE_TYPE (r); if (can_complete_type_without_circularity (rtype)) complete_type (rtype); if (!complete_or_array_type_p (rtype)) { /* If R's type couldn't be completed and it isn't a flexible array member (whose type is incomplete by definition) give an error. */ cxx_incomplete_type_error (r, rtype); TREE_TYPE (r) = error_mark_node; } else if (TREE_CODE (rtype) == ARRAY_TYPE && TYPE_DOMAIN (rtype) == NULL_TREE && (TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == QUAL_UNION_TYPE)) { error ("flexible array member %qD in union", r); TREE_TYPE (r) = error_mark_node; } else if (!verify_type_context (input_location, TCTX_FIELD, rtype)) TREE_TYPE (r) = error_mark_node; } /* If it is a TYPE_DECL for a class-scoped ENUMERAL_TYPE, such a thing will already have been added to the field list by tsubst_enum in finish_member_declaration in the CLASSTYPE_NESTED_UTDS case above. */ if (!(TREE_CODE (r) == TYPE_DECL && TREE_CODE (TREE_TYPE (r)) == ENUMERAL_TYPE && DECL_ARTIFICIAL (r))) { set_current_access_from_decl (r); finish_member_declaration (r); } } } } } else { if (TYPE_P (t) || DECL_CLASS_TEMPLATE_P (t) || DECL_TEMPLATE_TEMPLATE_PARM_P (t)) { /* Build new CLASSTYPE_FRIEND_CLASSES. */ tree friend_type = t; bool adjust_processing_template_decl = false; if (TREE_CODE (friend_type) == TEMPLATE_DECL) { /* template <class T> friend class C; */ friend_type = tsubst_friend_class (friend_type, args); adjust_processing_template_decl = true; } else if (TREE_CODE (friend_type) == UNBOUND_CLASS_TEMPLATE) { /* template <class T> friend class C::D; */ friend_type = tsubst (friend_type, args, tf_warning_or_error, NULL_TREE); if (TREE_CODE (friend_type) == TEMPLATE_DECL) friend_type = TREE_TYPE (friend_type); adjust_processing_template_decl = true; } else if (TREE_CODE (friend_type) == TYPENAME_TYPE || TREE_CODE (friend_type) == TEMPLATE_TYPE_PARM) { /* This could be either friend class T::C; when dependent_type_p is false or template <class U> friend class T::C; otherwise. */ /* Bump processing_template_decl in case this is something like template <class T> friend struct A<T>::B. */ ++processing_template_decl; friend_type = tsubst (friend_type, args, tf_warning_or_error, NULL_TREE); if (dependent_type_p (friend_type)) adjust_processing_template_decl = true; --processing_template_decl; } else if (uses_template_parms (friend_type)) /* friend class C<T>; */ friend_type = tsubst (friend_type, args, tf_warning_or_error, NULL_TREE); /* Otherwise it's friend class C; where C is already declared or friend class C<int>; We don't have to do anything in these cases. */ if (adjust_processing_template_decl) /* Trick make_friend_class into realizing that the friend we're adding is a template, not an ordinary class. It's important that we use make_friend_class since it will perform some error-checking and output cross-reference information. */ ++processing_template_decl; if (friend_type != error_mark_node) make_friend_class (type, friend_type, /*complain=*/false); if (adjust_processing_template_decl) --processing_template_decl; } else { /* Build new DECL_FRIENDLIST. */ tree r; /* The file and line for this declaration, to assist in error message reporting. Since we called push_tinst_level above, we don't need to restore these. */ input_location = DECL_SOURCE_LOCATION (t); if (TREE_CODE (t) == TEMPLATE_DECL) { ++processing_template_decl; push_deferring_access_checks (dk_no_check); } r = tsubst_friend_function (t, args); add_friend (type, r, /*complain=*/false); if (TREE_CODE (t) == TEMPLATE_DECL) { pop_deferring_access_checks (); --processing_template_decl; } } } } if (fn_context) { /* Restore these before substituting into the lambda capture initializers. */ cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; } /* Set the file and line number information to whatever is given for the class itself. This puts error messages involving generated implicit functions at a predictable point, and the same point that would be used for non-template classes. */ input_location = DECL_SOURCE_LOCATION (typedecl); unreverse_member_declarations (type); finish_struct_1 (type); TYPE_BEING_DEFINED (type) = 0; /* We don't instantiate default arguments for member functions. 14.7.1: The implicit instantiation of a class template specialization causes the implicit instantiation of the declarations, but not of the definitions or default arguments, of the class member functions, member classes, static data members and member templates.... */ perform_instantiation_time_access_checks (pattern, args); perform_deferred_access_checks (tf_warning_or_error); pop_nested_class (); maximum_field_alignment = saved_maximum_field_alignment; if (!fn_context) pop_from_top_level (); pop_tinst_level (); /* The vtable for a template class can be emitted in any translation unit in which the class is instantiated. When there is no key method, however, finish_struct_1 will already have added TYPE to the keyed_classes. */ if (TYPE_CONTAINS_VPTR_P (type) && CLASSTYPE_KEY_METHOD (type)) vec_safe_push (keyed_classes, type); return type; } /* Wrapper for instantiate_class_template_1. */ tree instantiate_class_template (tree type) { tree ret; timevar_push (TV_TEMPLATE_INST); ret = instantiate_class_template_1 (type); timevar_pop (TV_TEMPLATE_INST); return ret; } tree tsubst_template_arg (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree r; if (!t) r = t; else if (TYPE_P (t)) r = tsubst (t, args, complain, in_decl); else { if (!(complain & tf_warning)) ++c_inhibit_evaluation_warnings; r = tsubst_expr (t, args, complain, in_decl, /*integral_constant_expression_p=*/true); if (!(complain & tf_warning)) --c_inhibit_evaluation_warnings; } return r; } /* Given a function parameter pack TMPL_PARM and some function parameters instantiated from it at *SPEC_P, return a NONTYPE_ARGUMENT_PACK of them and set *SPEC_P to point at the next point in the list. */ tree extract_fnparm_pack (tree tmpl_parm, tree *spec_p) { /* Collect all of the extra "packed" parameters into an argument pack. */ tree parmvec; tree argpack = make_node (NONTYPE_ARGUMENT_PACK); tree spec_parm = *spec_p; int i, len; for (len = 0; spec_parm; ++len, spec_parm = TREE_CHAIN (spec_parm)) if (tmpl_parm && !function_parameter_expanded_from_pack_p (spec_parm, tmpl_parm)) break; /* Fill in PARMVEC and PARMTYPEVEC with all of the parameters. */ parmvec = make_tree_vec (len); spec_parm = *spec_p; for (i = 0; i < len; i++, spec_parm = DECL_CHAIN (spec_parm)) { tree elt = spec_parm; if (DECL_PACK_P (elt)) elt = make_pack_expansion (elt); TREE_VEC_ELT (parmvec, i) = elt; } /* Build the argument packs. */ SET_ARGUMENT_PACK_ARGS (argpack, parmvec); *spec_p = spec_parm; return argpack; } /* Give a chain SPEC_PARM of PARM_DECLs, pack them into a NONTYPE_ARGUMENT_PACK. */ static tree make_fnparm_pack (tree spec_parm) { return extract_fnparm_pack (NULL_TREE, &spec_parm); } /* Return 1 if the Ith element of the argument pack ARG_PACK is a pack expansion with no extra args, 2 if it has extra args, or 0 if it is not a pack expansion. */ static int argument_pack_element_is_expansion_p (tree arg_pack, int i) { if (TREE_CODE (arg_pack) == ARGUMENT_PACK_SELECT) /* We're being called before this happens in tsubst_pack_expansion. */ arg_pack = ARGUMENT_PACK_SELECT_FROM_PACK (arg_pack); tree vec = ARGUMENT_PACK_ARGS (arg_pack); if (i >= TREE_VEC_LENGTH (vec)) return 0; tree elt = TREE_VEC_ELT (vec, i); if (DECL_P (elt)) /* A decl pack is itself an expansion. */ elt = TREE_TYPE (elt); if (!PACK_EXPANSION_P (elt)) return 0; if (PACK_EXPANSION_EXTRA_ARGS (elt)) return 2; return 1; } /* Creates and return an ARGUMENT_PACK_SELECT tree node. */ static tree make_argument_pack_select (tree arg_pack, unsigned index) { tree aps = make_node (ARGUMENT_PACK_SELECT); ARGUMENT_PACK_SELECT_FROM_PACK (aps) = arg_pack; ARGUMENT_PACK_SELECT_INDEX (aps) = index; return aps; } /* This is a subroutine of tsubst_pack_expansion. It returns TRUE if we need to use the PACK_EXPANSION_EXTRA_ARGS mechanism to store the (non complete list of) arguments of the substitution and return a non substituted pack expansion, in order to wait for when we have enough arguments to really perform the substitution. */ static bool use_pack_expansion_extra_args_p (tree parm_packs, int arg_pack_len, bool has_empty_arg) { /* If one pack has an expansion and another pack has a normal argument or if one pack has an empty argument and an another one hasn't then tsubst_pack_expansion cannot perform the substitution and need to fall back on the PACK_EXPANSION_EXTRA mechanism. */ if (parm_packs == NULL_TREE) return false; else if (has_empty_arg) { /* If all the actual packs are pack expansions, we can still subsitute directly. */ for (tree p = parm_packs; p; p = TREE_CHAIN (p)) { tree a = TREE_VALUE (p); if (TREE_CODE (a) == ARGUMENT_PACK_SELECT) a = ARGUMENT_PACK_SELECT_FROM_PACK (a); a = ARGUMENT_PACK_ARGS (a); if (TREE_VEC_LENGTH (a) == 1) a = TREE_VEC_ELT (a, 0); if (PACK_EXPANSION_P (a)) continue; return true; } return false; } bool has_expansion_arg = false; for (int i = 0 ; i < arg_pack_len; ++i) { bool has_non_expansion_arg = false; for (tree parm_pack = parm_packs; parm_pack; parm_pack = TREE_CHAIN (parm_pack)) { tree arg = TREE_VALUE (parm_pack); int exp = argument_pack_element_is_expansion_p (arg, i); if (exp == 2) /* We can't substitute a pack expansion with extra args into our pattern. */ return true; else if (exp) has_expansion_arg = true; else has_non_expansion_arg = true; } if (has_expansion_arg && has_non_expansion_arg) return true; } return false; } /* [temp.variadic]/6 says that: The instantiation of a pack expansion [...] produces a list E1,E2, ..., En, where N is the number of elements in the pack expansion parameters. This subroutine of tsubst_pack_expansion produces one of these Ei. PATTERN is the pattern of the pack expansion. PARM_PACKS is a TREE_LIST in which each TREE_PURPOSE is a parameter pack of PATTERN, and each TREE_VALUE is its corresponding argument pack. INDEX is the index 'i' of the element Ei to produce. ARGS, COMPLAIN, and IN_DECL are the same parameters as for the tsubst_pack_expansion function. The function returns the resulting Ei upon successful completion, or error_mark_node. Note that this function possibly modifies the ARGS parameter, so it's the responsibility of the caller to restore it. */ static tree gen_elem_of_pack_expansion_instantiation (tree pattern, tree parm_packs, unsigned index, tree args /* This parm gets modified. */, tsubst_flags_t complain, tree in_decl) { tree t; bool ith_elem_is_expansion = false; /* For each parameter pack, change the substitution of the parameter pack to the ith argument in its argument pack, then expand the pattern. */ for (tree pack = parm_packs; pack; pack = TREE_CHAIN (pack)) { tree parm = TREE_PURPOSE (pack); tree arg_pack = TREE_VALUE (pack); tree aps; /* instance of ARGUMENT_PACK_SELECT. */ ith_elem_is_expansion |= argument_pack_element_is_expansion_p (arg_pack, index); /* Select the Ith argument from the pack. */ if (TREE_CODE (parm) == PARM_DECL || VAR_P (parm) || TREE_CODE (parm) == FIELD_DECL) { if (index == 0) { aps = make_argument_pack_select (arg_pack, index); if (!mark_used (parm, complain) && !(complain & tf_error)) return error_mark_node; register_local_specialization (aps, parm); } else aps = retrieve_local_specialization (parm); } else { int idx, level; template_parm_level_and_index (parm, &level, &idx); if (index == 0) { aps = make_argument_pack_select (arg_pack, index); /* Update the corresponding argument. */ TMPL_ARG (args, level, idx) = aps; } else /* Re-use the ARGUMENT_PACK_SELECT. */ aps = TMPL_ARG (args, level, idx); } ARGUMENT_PACK_SELECT_INDEX (aps) = index; } /* Substitute into the PATTERN with the (possibly altered) arguments. */ if (pattern == in_decl) /* Expanding a fixed parameter pack from coerce_template_parameter_pack. */ t = tsubst_decl (pattern, args, complain); else if (pattern == error_mark_node) t = error_mark_node; else if (!TYPE_P (pattern)) t = tsubst_expr (pattern, args, complain, in_decl, /*integral_constant_expression_p=*/false); else t = tsubst (pattern, args, complain, in_decl); /* If the Ith argument pack element is a pack expansion, then the Ith element resulting from the substituting is going to be a pack expansion as well. */ if (ith_elem_is_expansion) t = make_pack_expansion (t, complain); return t; } /* When the unexpanded parameter pack in a fold expression expands to an empty sequence, the value of the expression is as follows; the program is ill-formed if the operator is not listed in this table. && true || false , void() */ tree expand_empty_fold (tree t, tsubst_flags_t complain) { tree_code code = (tree_code)TREE_INT_CST_LOW (TREE_OPERAND (t, 0)); if (!FOLD_EXPR_MODIFY_P (t)) switch (code) { case TRUTH_ANDIF_EXPR: return boolean_true_node; case TRUTH_ORIF_EXPR: return boolean_false_node; case COMPOUND_EXPR: return void_node; default: break; } if (complain & tf_error) error_at (location_of (t), "fold of empty expansion over %O", code); return error_mark_node; } /* Given a fold-expression T and a current LEFT and RIGHT operand, form an expression that combines the two terms using the operator of T. */ static tree fold_expression (tree t, tree left, tree right, tsubst_flags_t complain) { tree op = FOLD_EXPR_OP (t); tree_code code = (tree_code)TREE_INT_CST_LOW (op); // Handle compound assignment operators. if (FOLD_EXPR_MODIFY_P (t)) return build_x_modify_expr (input_location, left, code, right, complain); warning_sentinel s(warn_parentheses); switch (code) { case COMPOUND_EXPR: return build_x_compound_expr (input_location, left, right, complain); default: return build_x_binary_op (input_location, code, left, TREE_CODE (left), right, TREE_CODE (right), /*overload=*/NULL, complain); } } /* Substitute ARGS into the pack of a fold expression T. */ static inline tree tsubst_fold_expr_pack (tree t, tree args, tsubst_flags_t complain, tree in_decl) { return tsubst_pack_expansion (FOLD_EXPR_PACK (t), args, complain, in_decl); } /* Substitute ARGS into the pack of a fold expression T. */ static inline tree tsubst_fold_expr_init (tree t, tree args, tsubst_flags_t complain, tree in_decl) { return tsubst_expr (FOLD_EXPR_INIT (t), args, complain, in_decl, false); } /* Expand a PACK of arguments into a grouped as left fold. Given a pack containing elements A0, A1, ..., An and an operator @, this builds the expression: ((A0 @ A1) @ A2) ... @ An Note that PACK must not be empty. The operator is defined by the original fold expression T. */ static tree expand_left_fold (tree t, tree pack, tsubst_flags_t complain) { tree left = TREE_VEC_ELT (pack, 0); for (int i = 1; i < TREE_VEC_LENGTH (pack); ++i) { tree right = TREE_VEC_ELT (pack, i); left = fold_expression (t, left, right, complain); } return left; } /* Substitute into a unary left fold expression. */ static tree tsubst_unary_left_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; return r; } if (TREE_VEC_LENGTH (pack) == 0) return expand_empty_fold (t, complain); else return expand_left_fold (t, pack, complain); } /* Substitute into a binary left fold expression. Do ths by building a single (non-empty) vector of argumnts and building the expression from those elements. */ static tree tsubst_binary_left_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; tree init = tsubst_fold_expr_init (t, args, complain, in_decl); if (init == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; FOLD_EXPR_INIT (r) = init; return r; } tree vec = make_tree_vec (TREE_VEC_LENGTH (pack) + 1); TREE_VEC_ELT (vec, 0) = init; for (int i = 0; i < TREE_VEC_LENGTH (pack); ++i) TREE_VEC_ELT (vec, i + 1) = TREE_VEC_ELT (pack, i); return expand_left_fold (t, vec, complain); } /* Expand a PACK of arguments into a grouped as right fold. Given a pack containing elementns A0, A1, ..., and an operator @, this builds the expression: A0@ ... (An-2 @ (An-1 @ An)) Note that PACK must not be empty. The operator is defined by the original fold expression T. */ tree expand_right_fold (tree t, tree pack, tsubst_flags_t complain) { // Build the expression. int n = TREE_VEC_LENGTH (pack); tree right = TREE_VEC_ELT (pack, n - 1); for (--n; n != 0; --n) { tree left = TREE_VEC_ELT (pack, n - 1); right = fold_expression (t, left, right, complain); } return right; } /* Substitute into a unary right fold expression. */ static tree tsubst_unary_right_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; return r; } if (TREE_VEC_LENGTH (pack) == 0) return expand_empty_fold (t, complain); else return expand_right_fold (t, pack, complain); } /* Substitute into a binary right fold expression. Do ths by building a single (non-empty) vector of arguments and building the expression from those elements. */ static tree tsubst_binary_right_fold (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pack = tsubst_fold_expr_pack (t, args, complain, in_decl); if (pack == error_mark_node) return error_mark_node; tree init = tsubst_fold_expr_init (t, args, complain, in_decl); if (init == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (pack)) { tree r = copy_node (t); FOLD_EXPR_PACK (r) = pack; FOLD_EXPR_INIT (r) = init; return r; } int n = TREE_VEC_LENGTH (pack); tree vec = make_tree_vec (n + 1); for (int i = 0; i < n; ++i) TREE_VEC_ELT (vec, i) = TREE_VEC_ELT (pack, i); TREE_VEC_ELT (vec, n) = init; return expand_right_fold (t, vec, complain); } /* Walk through the pattern of a pack expansion, adding everything in local_specializations to a list. */ class el_data { public: hash_set<tree> internal; tree extra; tsubst_flags_t complain; el_data (tsubst_flags_t c) : extra (NULL_TREE), complain (c) {} }; static tree extract_locals_r (tree *tp, int */*walk_subtrees*/, void *data_) { el_data &data = *reinterpret_cast<el_data*>(data_); tree *extra = &data.extra; tsubst_flags_t complain = data.complain; if (TYPE_P (*tp) && typedef_variant_p (*tp)) /* Remember local typedefs (85214). */ tp = &TYPE_NAME (*tp); if (TREE_CODE (*tp) == DECL_EXPR) data.internal.add (DECL_EXPR_DECL (*tp)); else if (tree spec = retrieve_local_specialization (*tp)) { if (data.internal.contains (*tp)) /* Don't mess with variables declared within the pattern. */ return NULL_TREE; if (TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK) { /* Maybe pull out the PARM_DECL for a partial instantiation. */ tree args = ARGUMENT_PACK_ARGS (spec); if (TREE_VEC_LENGTH (args) == 1) { tree elt = TREE_VEC_ELT (args, 0); if (PACK_EXPANSION_P (elt)) elt = PACK_EXPANSION_PATTERN (elt); if (DECL_PACK_P (elt)) spec = elt; } if (TREE_CODE (spec) == NONTYPE_ARGUMENT_PACK) { /* Handle lambda capture here, since we aren't doing any substitution now, and so tsubst_copy won't call process_outer_var_ref. */ tree args = ARGUMENT_PACK_ARGS (spec); int len = TREE_VEC_LENGTH (args); for (int i = 0; i < len; ++i) { tree arg = TREE_VEC_ELT (args, i); tree carg = arg; if (outer_automatic_var_p (arg)) carg = process_outer_var_ref (arg, complain); if (carg != arg) { /* Make a new NONTYPE_ARGUMENT_PACK of the capture proxies. */ if (i == 0) { spec = copy_node (spec); args = copy_node (args); SET_ARGUMENT_PACK_ARGS (spec, args); register_local_specialization (spec, *tp); } TREE_VEC_ELT (args, i) = carg; } } } } if (outer_automatic_var_p (spec)) spec = process_outer_var_ref (spec, complain); *extra = tree_cons (*tp, spec, *extra); } return NULL_TREE; } static tree extract_local_specs (tree pattern, tsubst_flags_t complain) { el_data data (complain); cp_walk_tree_without_duplicates (&pattern, extract_locals_r, &data); return data.extra; } /* Extract any uses of local_specializations from PATTERN and add them to ARGS for use in PACK_EXPANSION_EXTRA_ARGS. */ tree build_extra_args (tree pattern, tree args, tsubst_flags_t complain) { tree extra = args; if (local_specializations) if (tree locals = extract_local_specs (pattern, complain)) extra = tree_cons (NULL_TREE, extra, locals); return extra; } /* Apply any local specializations from PACK_EXPANSION_EXTRA_ARGS and add the normal template args to ARGS. */ tree add_extra_args (tree extra, tree args) { if (extra && TREE_CODE (extra) == TREE_LIST) { for (tree elt = TREE_CHAIN (extra); elt; elt = TREE_CHAIN (elt)) { /* The partial instantiation involved local declarations collected in extract_local_specs; map from the general template to our local context. */ tree gen = TREE_PURPOSE (elt); tree inst = TREE_VALUE (elt); if (DECL_P (inst)) if (tree local = retrieve_local_specialization (inst)) inst = local; /* else inst is already a full instantiation of the pack. */ register_local_specialization (inst, gen); } gcc_assert (!TREE_PURPOSE (extra)); extra = TREE_VALUE (extra); } #if 1 /* I think we should always be able to substitute dependent args into the pattern. If that turns out to be incorrect in some cases, enable the alternate code (and add complain/in_decl parms to this function). */ gcc_checking_assert (!uses_template_parms (extra)); #else if (!uses_template_parms (extra)) { gcc_unreachable (); extra = tsubst_template_args (extra, args, complain, in_decl); args = add_outermost_template_args (args, extra); } else #endif args = add_to_template_args (extra, args); return args; } /* Substitute ARGS into T, which is an pack expansion (i.e. TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION). Returns a TREE_VEC with the substituted arguments, a PACK_EXPANSION_* node (if only a partial substitution could be performed) or ERROR_MARK_NODE if there was an error. */ tree tsubst_pack_expansion (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree pattern; tree pack, packs = NULL_TREE; bool unsubstituted_packs = false; int i, len = -1; tree result; bool need_local_specializations = false; int levels; gcc_assert (PACK_EXPANSION_P (t)); pattern = PACK_EXPANSION_PATTERN (t); /* Add in any args remembered from an earlier partial instantiation. */ args = add_extra_args (PACK_EXPANSION_EXTRA_ARGS (t), args); levels = TMPL_ARGS_DEPTH (args); /* Determine the argument packs that will instantiate the parameter packs used in the expansion expression. While we're at it, compute the number of arguments to be expanded and make sure it is consistent. */ for (pack = PACK_EXPANSION_PARAMETER_PACKS (t); pack; pack = TREE_CHAIN (pack)) { tree parm_pack = TREE_VALUE (pack); tree arg_pack = NULL_TREE; tree orig_arg = NULL_TREE; int level = 0; if (TREE_CODE (parm_pack) == BASES) { gcc_assert (parm_pack == pattern); if (BASES_DIRECT (parm_pack)) return calculate_direct_bases (tsubst_expr (BASES_TYPE (parm_pack), args, complain, in_decl, false), complain); else return calculate_bases (tsubst_expr (BASES_TYPE (parm_pack), args, complain, in_decl, false), complain); } else if (builtin_pack_call_p (parm_pack)) { if (parm_pack != pattern) { if (complain & tf_error) sorry ("%qE is not the entire pattern of the pack expansion", parm_pack); return error_mark_node; } return expand_builtin_pack_call (parm_pack, args, complain, in_decl); } else if (TREE_CODE (parm_pack) == PARM_DECL) { /* We know we have correct local_specializations if this expansion is at function scope, or if we're dealing with a local parameter in a requires expression; for the latter, tsubst_requires_expr set it up appropriately. */ if (PACK_EXPANSION_LOCAL_P (t) || CONSTRAINT_VAR_P (parm_pack)) arg_pack = retrieve_local_specialization (parm_pack); else /* We can't rely on local_specializations for a parameter name used later in a function declaration (such as in a late-specified return type). Even if it exists, it might have the wrong value for a recursive call. */ need_local_specializations = true; if (!arg_pack) { /* This parameter pack was used in an unevaluated context. Just make a dummy decl, since it's only used for its type. */ ++cp_unevaluated_operand; arg_pack = tsubst_decl (parm_pack, args, complain); --cp_unevaluated_operand; if (arg_pack && DECL_PACK_P (arg_pack)) /* Partial instantiation of the parm_pack, we can't build up an argument pack yet. */ arg_pack = NULL_TREE; else arg_pack = make_fnparm_pack (arg_pack); } else if (DECL_PACK_P (arg_pack)) /* This argument pack isn't fully instantiated yet. */ arg_pack = NULL_TREE; } else if (is_capture_proxy (parm_pack)) { arg_pack = retrieve_local_specialization (parm_pack); if (DECL_PACK_P (arg_pack)) arg_pack = NULL_TREE; } else { int idx; template_parm_level_and_index (parm_pack, &level, &idx); if (level <= levels) arg_pack = TMPL_ARG (args, level, idx); if (arg_pack && TREE_CODE (arg_pack) == TEMPLATE_TYPE_PARM && TEMPLATE_TYPE_PARAMETER_PACK (arg_pack)) arg_pack = NULL_TREE; } orig_arg = arg_pack; if (arg_pack && TREE_CODE (arg_pack) == ARGUMENT_PACK_SELECT) arg_pack = ARGUMENT_PACK_SELECT_FROM_PACK (arg_pack); if (arg_pack && !ARGUMENT_PACK_P (arg_pack)) /* This can only happen if we forget to expand an argument pack somewhere else. Just return an error, silently. */ { result = make_tree_vec (1); TREE_VEC_ELT (result, 0) = error_mark_node; return result; } if (arg_pack) { int my_len = TREE_VEC_LENGTH (ARGUMENT_PACK_ARGS (arg_pack)); /* Don't bother trying to do a partial substitution with incomplete packs; we'll try again after deduction. */ if (ARGUMENT_PACK_INCOMPLETE_P (arg_pack)) return t; if (len < 0) len = my_len; else if (len != my_len) { if (!(complain & tf_error)) /* Fail quietly. */; else if (TREE_CODE (t) == TYPE_PACK_EXPANSION) error ("mismatched argument pack lengths while expanding %qT", pattern); else error ("mismatched argument pack lengths while expanding %qE", pattern); return error_mark_node; } /* Keep track of the parameter packs and their corresponding argument packs. */ packs = tree_cons (parm_pack, arg_pack, packs); TREE_TYPE (packs) = orig_arg; } else { /* We can't substitute for this parameter pack. We use a flag as well as the missing_level counter because function parameter packs don't have a level. */ gcc_assert (processing_template_decl || is_auto (parm_pack)); unsubstituted_packs = true; } } /* If the expansion is just T..., return the matching argument pack, unless we need to call convert_from_reference on all the elements. This is an important optimization; see c++/68422. */ if (!unsubstituted_packs && TREE_PURPOSE (packs) == pattern) { tree args = ARGUMENT_PACK_ARGS (TREE_VALUE (packs)); /* If the argument pack is a single pack expansion, pull it out. */ if (TREE_VEC_LENGTH (args) == 1 && pack_expansion_args_count (args)) return TREE_VEC_ELT (args, 0); /* Types need no adjustment, nor does sizeof..., and if we still have some pack expansion args we won't do anything yet. */ if (TREE_CODE (t) == TYPE_PACK_EXPANSION || PACK_EXPANSION_SIZEOF_P (t) || pack_expansion_args_count (args)) return args; /* Also optimize expression pack expansions if we can tell that the elements won't have reference type. */ tree type = TREE_TYPE (pattern); if (type && !TYPE_REF_P (type) && !PACK_EXPANSION_P (type) && !WILDCARD_TYPE_P (type)) return args; /* Otherwise use the normal path so we get convert_from_reference. */ } /* We cannot expand this expansion expression, because we don't have all of the argument packs we need. */ if (use_pack_expansion_extra_args_p (packs, len, unsubstituted_packs)) { /* We got some full packs, but we can't substitute them in until we have values for all the packs. So remember these until then. */ t = make_pack_expansion (pattern, complain); PACK_EXPANSION_EXTRA_ARGS (t) = build_extra_args (pattern, args, complain); return t; } /* If NEED_LOCAL_SPECIALIZATIONS then we're in a late-specified return type, so create our own local specializations map; the current map is either NULL or (in the case of recursive unification) might have bindings that we don't want to use or alter. */ local_specialization_stack lss (need_local_specializations ? lss_blank : lss_nop); if (unsubstituted_packs) { /* There were no real arguments, we're just replacing a parameter pack with another version of itself. Substitute into the pattern and return a PACK_EXPANSION_*. The caller will need to deal with that. */ if (TREE_CODE (t) == EXPR_PACK_EXPANSION) t = tsubst_expr (pattern, args, complain, in_decl, /*integral_constant_expression_p=*/false); else t = tsubst (pattern, args, complain, in_decl); t = make_pack_expansion (t, complain); return t; } gcc_assert (len >= 0); /* For each argument in each argument pack, substitute into the pattern. */ result = make_tree_vec (len); tree elem_args = copy_template_args (args); for (i = 0; i < len; ++i) { t = gen_elem_of_pack_expansion_instantiation (pattern, packs, i, elem_args, complain, in_decl); TREE_VEC_ELT (result, i) = t; if (t == error_mark_node) { result = error_mark_node; break; } } /* Update ARGS to restore the substitution from parameter packs to their argument packs. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { tree parm = TREE_PURPOSE (pack); if (TREE_CODE (parm) == PARM_DECL || VAR_P (parm) || TREE_CODE (parm) == FIELD_DECL) register_local_specialization (TREE_TYPE (pack), parm); else { int idx, level; if (TREE_VALUE (pack) == NULL_TREE) continue; template_parm_level_and_index (parm, &level, &idx); /* Update the corresponding argument. */ if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args)) TREE_VEC_ELT (TREE_VEC_ELT (args, level -1 ), idx) = TREE_TYPE (pack); else TREE_VEC_ELT (args, idx) = TREE_TYPE (pack); } } /* If the dependent pack arguments were such that we end up with only a single pack expansion again, there's no need to keep it in a TREE_VEC. */ if (len == 1 && TREE_CODE (result) == TREE_VEC && PACK_EXPANSION_P (TREE_VEC_ELT (result, 0))) return TREE_VEC_ELT (result, 0); return result; } /* Given PARM_DECL PARM, find the corresponding PARM_DECL in the template TMPL. We do this using DECL_PARM_INDEX, which should work even with parameter packs; all parms generated from a function parameter pack will have the same DECL_PARM_INDEX. */ tree get_pattern_parm (tree parm, tree tmpl) { tree pattern = DECL_TEMPLATE_RESULT (tmpl); tree patparm; if (DECL_ARTIFICIAL (parm)) { for (patparm = DECL_ARGUMENTS (pattern); patparm; patparm = DECL_CHAIN (patparm)) if (DECL_ARTIFICIAL (patparm) && DECL_NAME (parm) == DECL_NAME (patparm)) break; } else { patparm = FUNCTION_FIRST_USER_PARM (DECL_TEMPLATE_RESULT (tmpl)); patparm = chain_index (DECL_PARM_INDEX (parm)-1, patparm); gcc_assert (DECL_PARM_INDEX (patparm) == DECL_PARM_INDEX (parm)); } return patparm; } /* Make an argument pack out of the TREE_VEC VEC. */ static tree make_argument_pack (tree vec) { tree pack; if (TYPE_P (TREE_VEC_ELT (vec, 0))) pack = cxx_make_type (TYPE_ARGUMENT_PACK); else { pack = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (pack) = 1; } SET_ARGUMENT_PACK_ARGS (pack, vec); return pack; } /* Return an exact copy of template args T that can be modified independently. */ static tree copy_template_args (tree t) { if (t == error_mark_node) return t; int len = TREE_VEC_LENGTH (t); tree new_vec = make_tree_vec (len); for (int i = 0; i < len; ++i) { tree elt = TREE_VEC_ELT (t, i); if (elt && TREE_CODE (elt) == TREE_VEC) elt = copy_template_args (elt); TREE_VEC_ELT (new_vec, i) = elt; } NON_DEFAULT_TEMPLATE_ARGS_COUNT (new_vec) = NON_DEFAULT_TEMPLATE_ARGS_COUNT (t); return new_vec; } /* Substitute ARGS into the *_ARGUMENT_PACK orig_arg. */ tree tsubst_argument_pack (tree orig_arg, tree args, tsubst_flags_t complain, tree in_decl) { /* Substitute into each of the arguments. */ tree new_arg = TYPE_P (orig_arg) ? cxx_make_type (TREE_CODE (orig_arg)) : make_node (TREE_CODE (orig_arg)); tree pack_args = tsubst_template_args (ARGUMENT_PACK_ARGS (orig_arg), args, complain, in_decl); if (pack_args == error_mark_node) new_arg = error_mark_node; else SET_ARGUMENT_PACK_ARGS (new_arg, pack_args); if (TREE_CODE (new_arg) == NONTYPE_ARGUMENT_PACK) TREE_CONSTANT (new_arg) = TREE_CONSTANT (orig_arg); return new_arg; } /* Substitute ARGS into the vector or list of template arguments T. */ tree tsubst_template_args (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree orig_t = t; int len, need_new = 0, i, expanded_len_adjust = 0, out; tree *elts; if (t == error_mark_node) return error_mark_node; len = TREE_VEC_LENGTH (t); elts = XALLOCAVEC (tree, len); for (i = 0; i < len; i++) { tree orig_arg = TREE_VEC_ELT (t, i); tree new_arg; if (TREE_CODE (orig_arg) == TREE_VEC) new_arg = tsubst_template_args (orig_arg, args, complain, in_decl); else if (PACK_EXPANSION_P (orig_arg)) { /* Substitute into an expansion expression. */ new_arg = tsubst_pack_expansion (orig_arg, args, complain, in_decl); if (TREE_CODE (new_arg) == TREE_VEC) /* Add to the expanded length adjustment the number of expanded arguments. We subtract one from this measurement, because the argument pack expression itself is already counted as 1 in LEN. EXPANDED_LEN_ADJUST can actually be negative, if the argument pack is empty. */ expanded_len_adjust += TREE_VEC_LENGTH (new_arg) - 1; } else if (ARGUMENT_PACK_P (orig_arg)) new_arg = tsubst_argument_pack (orig_arg, args, complain, in_decl); else new_arg = tsubst_template_arg (orig_arg, args, complain, in_decl); if (new_arg == error_mark_node) return error_mark_node; elts[i] = new_arg; if (new_arg != orig_arg) need_new = 1; } if (!need_new) return t; /* Make space for the expanded arguments coming from template argument packs. */ t = make_tree_vec (len + expanded_len_adjust); /* ORIG_T can contain TREE_VECs. That happens if ORIG_T contains the arguments for a member template. In that case each TREE_VEC in ORIG_T represents a level of template arguments, and ORIG_T won't carry any non defaulted argument count. It will rather be the nested TREE_VECs that will carry one. In other words, ORIG_T carries a non defaulted argument count only if it doesn't contain any nested TREE_VEC. */ if (NON_DEFAULT_TEMPLATE_ARGS_COUNT (orig_t)) { int count = GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (orig_t); count += expanded_len_adjust; SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (t, count); } for (i = 0, out = 0; i < len; i++) { if ((PACK_EXPANSION_P (TREE_VEC_ELT (orig_t, i)) || ARGUMENT_PACK_P (TREE_VEC_ELT (orig_t, i))) && TREE_CODE (elts[i]) == TREE_VEC) { int idx; /* Now expand the template argument pack "in place". */ for (idx = 0; idx < TREE_VEC_LENGTH (elts[i]); idx++, out++) TREE_VEC_ELT (t, out) = TREE_VEC_ELT (elts[i], idx); } else { TREE_VEC_ELT (t, out) = elts[i]; out++; } } return t; } /* Substitute ARGS into one level PARMS of template parameters. */ static tree tsubst_template_parms_level (tree parms, tree args, tsubst_flags_t complain) { if (parms == error_mark_node) return error_mark_node; tree new_vec = make_tree_vec (TREE_VEC_LENGTH (parms)); for (int i = 0; i < TREE_VEC_LENGTH (new_vec); ++i) { tree tuple = TREE_VEC_ELT (parms, i); if (tuple == error_mark_node) continue; TREE_VEC_ELT (new_vec, i) = tsubst_template_parm (tuple, args, complain); } return new_vec; } /* Return the result of substituting ARGS into the template parameters given by PARMS. If there are m levels of ARGS and m + n levels of PARMS, then the result will contain n levels of PARMS. For example, if PARMS is `template <class T> template <class U> template <T*, U, class V>' and ARGS is {{int}, {double}} then the result will be `template <int*, double, class V>'. */ static tree tsubst_template_parms (tree parms, tree args, tsubst_flags_t complain) { tree r = NULL_TREE; tree* new_parms; /* When substituting into a template, we must set PROCESSING_TEMPLATE_DECL as the template parameters may be dependent if they are based on one-another, and the dependency predicates are short-circuit outside of templates. */ ++processing_template_decl; for (new_parms = &r; parms && TMPL_PARMS_DEPTH (parms) > TMPL_ARGS_DEPTH (args); new_parms = &(TREE_CHAIN (*new_parms)), parms = TREE_CHAIN (parms)) { tree new_vec = tsubst_template_parms_level (TREE_VALUE (parms), args, complain); *new_parms = tree_cons (size_int (TMPL_PARMS_DEPTH (parms) - TMPL_ARGS_DEPTH (args)), new_vec, NULL_TREE); TEMPLATE_PARMS_CONSTRAINTS (*new_parms) = TEMPLATE_PARMS_CONSTRAINTS (parms); } --processing_template_decl; return r; } /* Return the result of substituting ARGS into one template parameter given by T. T Must be a TREE_LIST which TREE_VALUE is the template parameter and which TREE_PURPOSE is the default argument of the template parameter. */ static tree tsubst_template_parm (tree t, tree args, tsubst_flags_t complain) { tree default_value, parm_decl; if (args == NULL_TREE || t == NULL_TREE || t == error_mark_node) return t; gcc_assert (TREE_CODE (t) == TREE_LIST); default_value = TREE_PURPOSE (t); parm_decl = TREE_VALUE (t); tree constraint = TEMPLATE_PARM_CONSTRAINTS (t); parm_decl = tsubst (parm_decl, args, complain, NULL_TREE); if (TREE_CODE (parm_decl) == PARM_DECL && invalid_nontype_parm_type_p (TREE_TYPE (parm_decl), complain)) parm_decl = error_mark_node; default_value = tsubst_template_arg (default_value, args, complain, NULL_TREE); constraint = tsubst_constraint (constraint, args, complain, NULL_TREE); tree r = build_tree_list (default_value, parm_decl); TEMPLATE_PARM_CONSTRAINTS (r) = constraint; return r; } /* Substitute the ARGS into the indicated aggregate (or enumeration) type T. If T is not an aggregate or enumeration type, it is handled as if by tsubst. IN_DECL is as for tsubst. If ENTERING_SCOPE is nonzero, T is the context for a template which we are presently tsubst'ing. Return the substituted value. */ static tree tsubst_aggr_type (tree t, tree args, tsubst_flags_t complain, tree in_decl, int entering_scope) { if (t == NULL_TREE) return NULL_TREE; switch (TREE_CODE (t)) { case RECORD_TYPE: if (TYPE_PTRMEMFUNC_P (t)) return tsubst (TYPE_PTRMEMFUNC_FN_TYPE (t), args, complain, in_decl); /* Fall through. */ case ENUMERAL_TYPE: case UNION_TYPE: if (TYPE_TEMPLATE_INFO (t) && uses_template_parms (t)) { tree argvec; tree context; tree r; /* In "sizeof(X<I>)" we need to evaluate "I". */ cp_evaluated ev; /* First, determine the context for the type we are looking up. */ context = TYPE_CONTEXT (t); if (context && TYPE_P (context)) { context = tsubst_aggr_type (context, args, complain, in_decl, /*entering_scope=*/1); /* If context is a nested class inside a class template, it may still need to be instantiated (c++/33959). */ context = complete_type (context); } /* Then, figure out what arguments are appropriate for the type we are trying to find. For example, given: template <class T> struct S; template <class T, class U> void f(T, U) { S<U> su; } and supposing that we are instantiating f<int, double>, then our ARGS will be {int, double}, but, when looking up S we only want {double}. */ argvec = tsubst_template_args (TYPE_TI_ARGS (t), args, complain, in_decl); if (argvec == error_mark_node) r = error_mark_node; else if (!entering_scope && cxx_dialect >= cxx17 && dependent_scope_p (context)) { /* See maybe_dependent_member_ref. */ tree name = TYPE_IDENTIFIER (t); tree fullname = name; if (instantiates_primary_template_p (t)) fullname = build_nt (TEMPLATE_ID_EXPR, name, INNERMOST_TEMPLATE_ARGS (argvec)); return build_typename_type (context, name, fullname, typename_type); } else { r = lookup_template_class (t, argvec, in_decl, context, entering_scope, complain); r = cp_build_qualified_type_real (r, cp_type_quals (t), complain); } return r; } else /* This is not a template type, so there's nothing to do. */ return t; default: return tsubst (t, args, complain, in_decl); } } static GTY((cache)) decl_tree_cache_map *defarg_inst; /* Substitute into the default argument ARG (a default argument for FN), which has the indicated TYPE. */ tree tsubst_default_argument (tree fn, int parmnum, tree type, tree arg, tsubst_flags_t complain) { int errs = errorcount + sorrycount; /* This can happen in invalid code. */ if (TREE_CODE (arg) == DEFERRED_PARSE) return arg; /* Shortcut {}. */ if (BRACE_ENCLOSED_INITIALIZER_P (arg) && CONSTRUCTOR_NELTS (arg) == 0) return arg; tree parm = FUNCTION_FIRST_USER_PARM (fn); parm = chain_index (parmnum, parm); tree parmtype = TREE_TYPE (parm); if (DECL_BY_REFERENCE (parm)) parmtype = TREE_TYPE (parmtype); if (parmtype == error_mark_node) return error_mark_node; gcc_assert (same_type_ignoring_top_level_qualifiers_p (type, parmtype)); tree *slot; if (defarg_inst && (slot = defarg_inst->get (parm))) return *slot; /* This default argument came from a template. Instantiate the default argument here, not in tsubst. In the case of something like: template <class T> struct S { static T t(); void f(T = t()); }; we must be careful to do name lookup in the scope of S<T>, rather than in the current class. */ push_to_top_level (); push_access_scope (fn); push_deferring_access_checks (dk_no_deferred); start_lambda_scope (parm); /* The default argument expression may cause implicitly defined member functions to be synthesized, which will result in garbage collection. We must treat this situation as if we were within the body of function so as to avoid collecting live data on the stack. */ ++function_depth; arg = tsubst_expr (arg, DECL_TI_ARGS (fn), complain, NULL_TREE, /*integral_constant_expression_p=*/false); --function_depth; finish_lambda_scope (); /* Make sure the default argument is reasonable. */ arg = check_default_argument (type, arg, complain); if (errorcount+sorrycount > errs && (complain & tf_warning_or_error)) inform (input_location, " when instantiating default argument for call to %qD", fn); pop_deferring_access_checks (); pop_access_scope (fn); pop_from_top_level (); if (arg != error_mark_node && !cp_unevaluated_operand) { if (!defarg_inst) defarg_inst = decl_tree_cache_map::create_ggc (37); defarg_inst->put (parm, arg); } return arg; } /* Substitute into all the default arguments for FN. */ static void tsubst_default_arguments (tree fn, tsubst_flags_t complain) { tree arg; tree tmpl_args; tmpl_args = DECL_TI_ARGS (fn); /* If this function is not yet instantiated, we certainly don't need its default arguments. */ if (uses_template_parms (tmpl_args)) return; /* Don't do this again for clones. */ if (DECL_CLONED_FUNCTION_P (fn)) return; int i = 0; for (arg = TYPE_ARG_TYPES (TREE_TYPE (fn)); arg; arg = TREE_CHAIN (arg), ++i) if (TREE_PURPOSE (arg)) TREE_PURPOSE (arg) = tsubst_default_argument (fn, i, TREE_VALUE (arg), TREE_PURPOSE (arg), complain); } /* Hash table mapping a FUNCTION_DECL to its dependent explicit-specifier. */ static GTY((cache)) decl_tree_cache_map *explicit_specifier_map; /* Store a pair to EXPLICIT_SPECIFIER_MAP. */ void store_explicit_specifier (tree v, tree t) { if (!explicit_specifier_map) explicit_specifier_map = decl_tree_cache_map::create_ggc (37); DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (v) = true; explicit_specifier_map->put (v, t); } /* Lookup an element in EXPLICIT_SPECIFIER_MAP. */ static tree lookup_explicit_specifier (tree v) { return *explicit_specifier_map->get (v); } /* Given T, a FUNCTION_TYPE or METHOD_TYPE, construct and return a corresponding FUNCTION_TYPE or METHOD_TYPE whose return type is RETURN_TYPE, argument types are ARG_TYPES, and exception specification is RAISES, and otherwise is identical to T. */ static tree rebuild_function_or_method_type (tree t, tree return_type, tree arg_types, tree raises, tsubst_flags_t complain) { gcc_assert (FUNC_OR_METHOD_TYPE_P (t)); tree new_type; if (TREE_CODE (t) == FUNCTION_TYPE) { new_type = build_function_type (return_type, arg_types); new_type = apply_memfn_quals (new_type, type_memfn_quals (t)); } else { tree r = TREE_TYPE (TREE_VALUE (arg_types)); /* Don't pick up extra function qualifiers from the basetype. */ r = cp_build_qualified_type_real (r, type_memfn_quals (t), complain); if (! MAYBE_CLASS_TYPE_P (r)) { /* [temp.deduct] Type deduction may fail for any of the following reasons: -- Attempting to create "pointer to member of T" when T is not a class type. */ if (complain & tf_error) error ("creating pointer to member function of non-class type %qT", r); return error_mark_node; } new_type = build_method_type_directly (r, return_type, TREE_CHAIN (arg_types)); } new_type = cp_build_type_attribute_variant (new_type, TYPE_ATTRIBUTES (t)); cp_ref_qualifier rqual = type_memfn_rqual (t); bool late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (t); return build_cp_fntype_variant (new_type, rqual, raises, late_return_type_p); } /* Check if the function type of DECL, a FUNCTION_DECL, agrees with the type of each of its formal parameters. If there is a disagreement then rebuild DECL's function type according to its formal parameter types, as part of a resolution for Core issues 1001/1322. */ static void maybe_rebuild_function_decl_type (tree decl) { bool function_type_needs_rebuilding = false; if (tree parm_list = FUNCTION_FIRST_USER_PARM (decl)) { tree parm_type_list = FUNCTION_FIRST_USER_PARMTYPE (decl); while (parm_type_list && parm_type_list != void_list_node) { tree parm_type = TREE_VALUE (parm_type_list); tree formal_parm_type_unqual = strip_top_quals (TREE_TYPE (parm_list)); if (!same_type_p (parm_type, formal_parm_type_unqual)) { function_type_needs_rebuilding = true; break; } parm_list = DECL_CHAIN (parm_list); parm_type_list = TREE_CHAIN (parm_type_list); } } if (!function_type_needs_rebuilding) return; const tree fntype = TREE_TYPE (decl); tree parm_list = DECL_ARGUMENTS (decl); tree old_parm_type_list = TYPE_ARG_TYPES (fntype); tree new_parm_type_list = NULL_TREE; tree *q = &new_parm_type_list; for (int skip = num_artificial_parms_for (decl); skip > 0; skip--) { *q = copy_node (old_parm_type_list); parm_list = DECL_CHAIN (parm_list); old_parm_type_list = TREE_CHAIN (old_parm_type_list); q = &TREE_CHAIN (*q); } while (old_parm_type_list && old_parm_type_list != void_list_node) { *q = copy_node (old_parm_type_list); tree *new_parm_type = &TREE_VALUE (*q); tree formal_parm_type_unqual = strip_top_quals (TREE_TYPE (parm_list)); if (!same_type_p (*new_parm_type, formal_parm_type_unqual)) *new_parm_type = formal_parm_type_unqual; parm_list = DECL_CHAIN (parm_list); old_parm_type_list = TREE_CHAIN (old_parm_type_list); q = &TREE_CHAIN (*q); } if (old_parm_type_list == void_list_node) *q = void_list_node; TREE_TYPE (decl) = rebuild_function_or_method_type (fntype, TREE_TYPE (fntype), new_parm_type_list, TYPE_RAISES_EXCEPTIONS (fntype), tf_none); } /* Subroutine of tsubst_decl for the case when T is a FUNCTION_DECL. */ static tree tsubst_function_decl (tree t, tree args, tsubst_flags_t complain, tree lambda_fntype) { tree gen_tmpl = NULL_TREE, argvec = NULL_TREE; hashval_t hash = 0; tree in_decl = t; /* Nobody should be tsubst'ing into non-template functions. */ gcc_assert (DECL_TEMPLATE_INFO (t) != NULL_TREE || DECL_LOCAL_DECL_P (t)); if (DECL_LOCAL_DECL_P (t)) { if (tree spec = retrieve_local_specialization (t)) return spec; } else if (TREE_CODE (DECL_TI_TEMPLATE (t)) == TEMPLATE_DECL) { /* If T is not dependent, just return it. */ if (!uses_template_parms (DECL_TI_ARGS (t)) && !LAMBDA_FUNCTION_P (t)) return t; /* Calculate the most general template of which R is a specialization. */ gen_tmpl = most_general_template (DECL_TI_TEMPLATE (t)); /* We're substituting a lambda function under tsubst_lambda_expr but not directly from it; find the matching function we're already inside. But don't do this if T is a generic lambda with a single level of template parms, as in that case we're doing a normal instantiation. */ if (LAMBDA_FUNCTION_P (t) && !lambda_fntype && (!generic_lambda_fn_p (t) || TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl)) > 1)) return enclosing_instantiation_of (t); /* Calculate the complete set of arguments used to specialize R. */ argvec = tsubst_template_args (DECL_TI_ARGS (DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (t))), args, complain, in_decl); if (argvec == error_mark_node) return error_mark_node; /* Check to see if we already have this specialization. */ if (!lambda_fntype) { hash = hash_tmpl_and_args (gen_tmpl, argvec); if (tree spec = retrieve_specialization (gen_tmpl, argvec, hash)) return spec; } /* We can see more levels of arguments than parameters if there was a specialization of a member template, like this: template <class T> struct S { template <class U> void f(); } template <> template <class U> void S<int>::f(U); Here, we'll be substituting into the specialization, because that's where we can find the code we actually want to generate, but we'll have enough arguments for the most general template. We also deal with the peculiar case: template <class T> struct S { template <class U> friend void f(); }; template <class U> void f() {} template S<int>; template void f<double>(); Here, the ARGS for the instantiation of will be {int, double}. But, we only need as many ARGS as there are levels of template parameters in CODE_PATTERN. We are careful not to get fooled into reducing the ARGS in situations like: template <class T> struct S { template <class U> void f(U); } template <class T> template <> void S<T>::f(int) {} which we can spot because the pattern will be a specialization in this case. */ int args_depth = TMPL_ARGS_DEPTH (args); int parms_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (DECL_TI_TEMPLATE (t))); if (args_depth > parms_depth && !DECL_TEMPLATE_SPECIALIZATION (t)) args = get_innermost_template_args (args, parms_depth); } else { /* This special case arises when we have something like this: template <class T> struct S { friend void f<int>(int, double); }; Here, the DECL_TI_TEMPLATE for the friend declaration will be an IDENTIFIER_NODE. We are being called from tsubst_friend_function, and we want only to create a new decl (R) with appropriate types so that we can call determine_specialization. */ gen_tmpl = NULL_TREE; argvec = NULL_TREE; } tree closure = (lambda_fntype ? TYPE_METHOD_BASETYPE (lambda_fntype) : NULL_TREE); tree ctx = closure ? closure : DECL_CONTEXT (t); bool member = ctx && TYPE_P (ctx); if (member && !closure) ctx = tsubst_aggr_type (ctx, args, complain, t, /*entering_scope=*/1); tree type = (lambda_fntype ? lambda_fntype : tsubst (TREE_TYPE (t), args, complain | tf_fndecl_type, in_decl)); if (type == error_mark_node) return error_mark_node; /* If we hit excessive deduction depth, the type is bogus even if it isn't error_mark_node, so don't build a decl. */ if (excessive_deduction_depth) return error_mark_node; /* We do NOT check for matching decls pushed separately at this point, as they may not represent instantiations of this template, and in any case are considered separate under the discrete model. */ tree r = copy_decl (t); DECL_USE_TEMPLATE (r) = 0; TREE_TYPE (r) = type; /* Clear out the mangled name and RTL for the instantiation. */ SET_DECL_ASSEMBLER_NAME (r, NULL_TREE); SET_DECL_RTL (r, NULL); /* Leave DECL_INITIAL set on deleted instantiations. */ if (!DECL_DELETED_FN (r)) DECL_INITIAL (r) = NULL_TREE; DECL_CONTEXT (r) = ctx; /* Handle explicit(dependent-expr). */ if (DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (t)) { tree spec = lookup_explicit_specifier (t); spec = tsubst_copy_and_build (spec, args, complain, in_decl, /*function_p=*/false, /*i_c_e_p=*/true); spec = build_explicit_specifier (spec, complain); DECL_NONCONVERTING_P (r) = (spec == boolean_true_node); } /* OpenMP UDRs have the only argument a reference to the declared type. We want to diagnose if the declared type is a reference, which is invalid, but as references to references are usually quietly merged, diagnose it here. */ if (DECL_OMP_DECLARE_REDUCTION_P (t)) { tree argtype = TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (t)))); argtype = tsubst (argtype, args, complain, in_decl); if (TYPE_REF_P (argtype)) error_at (DECL_SOURCE_LOCATION (t), "reference type %qT in " "%<#pragma omp declare reduction%>", argtype); if (strchr (IDENTIFIER_POINTER (DECL_NAME (t)), '~') == NULL) DECL_NAME (r) = omp_reduction_id (ERROR_MARK, DECL_NAME (t), argtype); } if (member && DECL_CONV_FN_P (r)) /* Type-conversion operator. Reconstruct the name, in case it's the name of one of the template's parameters. */ DECL_NAME (r) = make_conv_op_name (TREE_TYPE (type)); tree parms = DECL_ARGUMENTS (t); if (closure) parms = DECL_CHAIN (parms); parms = tsubst (parms, args, complain, t); for (tree parm = parms; parm; parm = DECL_CHAIN (parm)) DECL_CONTEXT (parm) = r; if (closure) { tree tparm = build_this_parm (r, closure, type_memfn_quals (type)); DECL_NAME (tparm) = closure_identifier; DECL_CHAIN (tparm) = parms; parms = tparm; } DECL_ARGUMENTS (r) = parms; DECL_RESULT (r) = NULL_TREE; maybe_rebuild_function_decl_type (r); TREE_STATIC (r) = 0; TREE_PUBLIC (r) = TREE_PUBLIC (t); DECL_EXTERNAL (r) = 1; /* If this is an instantiation of a function with internal linkage, we already know what object file linkage will be assigned to the instantiation. */ DECL_INTERFACE_KNOWN (r) = !TREE_PUBLIC (r); DECL_DEFER_OUTPUT (r) = 0; DECL_CHAIN (r) = NULL_TREE; DECL_PENDING_INLINE_INFO (r) = 0; DECL_PENDING_INLINE_P (r) = 0; DECL_SAVED_TREE (r) = NULL_TREE; DECL_STRUCT_FUNCTION (r) = NULL; TREE_USED (r) = 0; /* We'll re-clone as appropriate in instantiate_template. */ DECL_CLONED_FUNCTION (r) = NULL_TREE; /* If we aren't complaining now, return on error before we register the specialization so that we'll complain eventually. */ if ((complain & tf_error) == 0 && IDENTIFIER_ANY_OP_P (DECL_NAME (r)) && !grok_op_properties (r, /*complain=*/false)) return error_mark_node; /* Associate the constraints directly with the instantiation. We don't substitute through the constraints; that's only done when they are checked. */ if (tree ci = get_constraints (t)) /* Unless we're regenerating a lambda, in which case we'll set the lambda's constraints in tsubst_lambda_expr. */ if (!lambda_fntype) set_constraints (r, ci); if (DECL_FRIEND_CONTEXT (t)) SET_DECL_FRIEND_CONTEXT (r, tsubst (DECL_FRIEND_CONTEXT (t), args, complain, in_decl)); /* Set up the DECL_TEMPLATE_INFO for R. There's no need to do this in the special friend case mentioned above where GEN_TMPL is NULL. */ if (gen_tmpl && !closure) { DECL_TEMPLATE_INFO (r) = build_template_info (gen_tmpl, argvec); SET_DECL_IMPLICIT_INSTANTIATION (r); tree new_r = register_specialization (r, gen_tmpl, argvec, false, hash); if (new_r != r) /* We instantiated this while substituting into the type earlier (template/friend54.C). */ return new_r; /* We're not supposed to instantiate default arguments until they are called, for a template. But, for a declaration like: template <class T> void f () { extern void g(int i = T()); } we should do the substitution when the template is instantiated. We handle the member function case in instantiate_class_template since the default arguments might refer to other members of the class. */ if (!member && !PRIMARY_TEMPLATE_P (gen_tmpl) && !uses_template_parms (argvec)) tsubst_default_arguments (r, complain); } else if (DECL_LOCAL_DECL_P (r)) { if (!cp_unevaluated_operand) register_local_specialization (r, t); } else DECL_TEMPLATE_INFO (r) = NULL_TREE; /* Copy the list of befriending classes. */ for (tree *friends = &DECL_BEFRIENDING_CLASSES (r); *friends; friends = &TREE_CHAIN (*friends)) { *friends = copy_node (*friends); TREE_VALUE (*friends) = tsubst (TREE_VALUE (*friends), args, complain, in_decl); } if (DECL_CONSTRUCTOR_P (r) || DECL_DESTRUCTOR_P (r)) { maybe_retrofit_in_chrg (r); if (DECL_CONSTRUCTOR_P (r) && !grok_ctor_properties (ctx, r)) return error_mark_node; /* If this is an instantiation of a member template, clone it. If it isn't, that'll be handled by clone_constructors_and_destructors. */ if (PRIMARY_TEMPLATE_P (gen_tmpl)) clone_cdtor (r, /*update_methods=*/false); } else if ((complain & tf_error) != 0 && IDENTIFIER_ANY_OP_P (DECL_NAME (r)) && !grok_op_properties (r, /*complain=*/true)) return error_mark_node; /* Possibly limit visibility based on template args. */ DECL_VISIBILITY (r) = VISIBILITY_DEFAULT; if (DECL_VISIBILITY_SPECIFIED (t)) { DECL_VISIBILITY_SPECIFIED (r) = 0; DECL_ATTRIBUTES (r) = remove_attribute ("visibility", DECL_ATTRIBUTES (r)); } determine_visibility (r); if (DECL_DEFAULTED_OUTSIDE_CLASS_P (r) && !processing_template_decl) defaulted_late_check (r); apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0, args, complain, in_decl); if (flag_openmp) if (tree attr = lookup_attribute ("omp declare variant base", DECL_ATTRIBUTES (r))) omp_declare_variant_finalize (r, attr); return r; } /* Subroutine of tsubst_decl for the case when T is a TEMPLATE_DECL. */ static tree tsubst_template_decl (tree t, tree args, tsubst_flags_t complain, tree lambda_fntype) { /* We can get here when processing a member function template, member class template, or template template parameter. */ tree decl = DECL_TEMPLATE_RESULT (t); tree in_decl = t; tree spec; tree tmpl_args; tree full_args; tree r; hashval_t hash = 0; if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) { /* Template template parameter is treated here. */ tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (new_type == error_mark_node) r = error_mark_node; /* If we get a real template back, return it. This can happen in the context of most_specialized_partial_spec. */ else if (TREE_CODE (new_type) == TEMPLATE_DECL) r = new_type; else /* The new TEMPLATE_DECL was built in reduce_template_parm_level. */ r = TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (new_type); return r; } if (!lambda_fntype) { /* We might already have an instance of this template. The ARGS are for the surrounding class type, so the full args contain the tsubst'd args for the context, plus the innermost args from the template decl. */ tmpl_args = DECL_CLASS_TEMPLATE_P (t) ? CLASSTYPE_TI_ARGS (TREE_TYPE (t)) : DECL_TI_ARGS (DECL_TEMPLATE_RESULT (t)); /* Because this is a template, the arguments will still be dependent, even after substitution. If PROCESSING_TEMPLATE_DECL is not set, the dependency predicates will short-circuit. */ ++processing_template_decl; full_args = tsubst_template_args (tmpl_args, args, complain, in_decl); --processing_template_decl; if (full_args == error_mark_node) return error_mark_node; /* If this is a default template template argument, tsubst might not have changed anything. */ if (full_args == tmpl_args) return t; hash = hash_tmpl_and_args (t, full_args); spec = retrieve_specialization (t, full_args, hash); if (spec != NULL_TREE) { if (TYPE_P (spec)) /* Type partial instantiations are stored as the type by lookup_template_class_1, not here as the template. */ spec = CLASSTYPE_TI_TEMPLATE (spec); return spec; } } /* Make a new template decl. It will be similar to the original, but will record the current template arguments. We also create a new function declaration, which is just like the old one, but points to this new template, rather than the old one. */ r = copy_decl (t); gcc_assert (DECL_LANG_SPECIFIC (r) != 0); DECL_CHAIN (r) = NULL_TREE; // Build new template info linking to the original template decl. if (!lambda_fntype) { DECL_TEMPLATE_INFO (r) = build_template_info (t, args); SET_DECL_IMPLICIT_INSTANTIATION (r); } else DECL_TEMPLATE_INFO (r) = NULL_TREE; /* The template parameters for this new template are all the template parameters for the old template, except the outermost level of parameters. */ DECL_TEMPLATE_PARMS (r) = tsubst_template_parms (DECL_TEMPLATE_PARMS (t), args, complain); bool class_p = false; tree inner = decl; ++processing_template_decl; if (TREE_CODE (inner) == FUNCTION_DECL) inner = tsubst_function_decl (inner, args, complain, lambda_fntype); else { if (TREE_CODE (inner) == TYPE_DECL && !TYPE_DECL_ALIAS_P (inner)) { class_p = true; inner = TREE_TYPE (inner); } if (class_p) inner = tsubst_aggr_type (inner, args, complain, in_decl, /*entering*/1); else inner = tsubst (inner, args, complain, in_decl); } --processing_template_decl; if (inner == error_mark_node) return error_mark_node; if (class_p) { /* For a partial specialization, we need to keep pointing to the primary template. */ if (!DECL_TEMPLATE_SPECIALIZATION (t)) CLASSTYPE_TI_TEMPLATE (inner) = r; DECL_TI_ARGS (r) = CLASSTYPE_TI_ARGS (inner); inner = TYPE_MAIN_DECL (inner); } else if (lambda_fntype) { tree args = template_parms_to_args (DECL_TEMPLATE_PARMS (r)); DECL_TEMPLATE_INFO (inner) = build_template_info (r, args); } else { if (TREE_CODE (decl) != TYPE_DECL || !TYPE_DECL_ALIAS_P (decl)) DECL_TI_TEMPLATE (inner) = r; DECL_TI_ARGS (r) = DECL_TI_ARGS (inner); } DECL_TEMPLATE_RESULT (r) = inner; TREE_TYPE (r) = TREE_TYPE (inner); DECL_CONTEXT (r) = DECL_CONTEXT (inner); DECL_TEMPLATE_INSTANTIATIONS (r) = NULL_TREE; DECL_TEMPLATE_SPECIALIZATIONS (r) = NULL_TREE; if (PRIMARY_TEMPLATE_P (t)) DECL_PRIMARY_TEMPLATE (r) = r; if (TREE_CODE (decl) != TYPE_DECL && !VAR_P (decl) && !lambda_fntype) /* Record this non-type partial instantiation. */ register_specialization (r, t, DECL_TI_ARGS (DECL_TEMPLATE_RESULT (r)), false, hash); return r; } /* True if FN is the op() for a lambda in an uninstantiated template. */ bool lambda_fn_in_template_p (tree fn) { if (!fn || !LAMBDA_FUNCTION_P (fn)) return false; tree closure = DECL_CONTEXT (fn); return CLASSTYPE_TEMPLATE_INFO (closure) != NULL_TREE; } /* True if FN is the substitution (via tsubst_lambda_expr) of a function for which the above is true. */ bool instantiated_lambda_fn_p (tree fn) { if (!fn || !LAMBDA_FUNCTION_P (fn)) return false; tree closure = DECL_CONTEXT (fn); tree lam = CLASSTYPE_LAMBDA_EXPR (closure); return LAMBDA_EXPR_INSTANTIATED (lam); } /* We're instantiating a variable from template function TCTX. Return the corresponding current enclosing scope. This gets complicated because lambda functions in templates are regenerated rather than instantiated, but generic lambda functions are subsequently instantiated. */ static tree enclosing_instantiation_of (tree otctx) { tree tctx = otctx; tree fn = current_function_decl; int lambda_count = 0; for (; tctx && (lambda_fn_in_template_p (tctx) || instantiated_lambda_fn_p (tctx)); tctx = decl_function_context (tctx)) ++lambda_count; for (; fn; fn = decl_function_context (fn)) { tree ofn = fn; int flambda_count = 0; for (; fn && instantiated_lambda_fn_p (fn); fn = decl_function_context (fn)) ++flambda_count; if ((fn && DECL_TEMPLATE_INFO (fn)) ? most_general_template (fn) != most_general_template (tctx) : fn != tctx) continue; if (flambda_count != lambda_count) { gcc_assert (flambda_count > lambda_count); for (; flambda_count > lambda_count; --flambda_count) ofn = decl_function_context (ofn); } gcc_assert (DECL_NAME (ofn) == DECL_NAME (otctx) || DECL_CONV_FN_P (ofn)); return ofn; } gcc_unreachable (); } /* Substitute the ARGS into the T, which is a _DECL. Return the result of the substitution. Issue error and warning messages under control of COMPLAIN. */ static tree tsubst_decl (tree t, tree args, tsubst_flags_t complain) { #define RETURN(EXP) do { r = (EXP); goto out; } while(0) location_t saved_loc; tree r = NULL_TREE; tree in_decl = t; hashval_t hash = 0; /* Set the filename and linenumber to improve error-reporting. */ saved_loc = input_location; input_location = DECL_SOURCE_LOCATION (t); switch (TREE_CODE (t)) { case TEMPLATE_DECL: r = tsubst_template_decl (t, args, complain, /*lambda*/NULL_TREE); break; case FUNCTION_DECL: r = tsubst_function_decl (t, args, complain, /*lambda*/NULL_TREE); break; case PARM_DECL: { tree type = NULL_TREE; int i, len = 1; tree expanded_types = NULL_TREE; tree prev_r = NULL_TREE; tree first_r = NULL_TREE; if (DECL_PACK_P (t)) { /* If there is a local specialization that isn't a parameter pack, it means that we're doing a "simple" substitution from inside tsubst_pack_expansion. Just return the local specialization (which will be a single parm). */ tree spec = retrieve_local_specialization (t); if (spec && TREE_CODE (spec) == PARM_DECL && TREE_CODE (TREE_TYPE (spec)) != TYPE_PACK_EXPANSION) RETURN (spec); /* Expand the TYPE_PACK_EXPANSION that provides the types for the parameters in this function parameter pack. */ expanded_types = tsubst_pack_expansion (TREE_TYPE (t), args, complain, in_decl); if (TREE_CODE (expanded_types) == TREE_VEC) { len = TREE_VEC_LENGTH (expanded_types); /* Zero-length parameter packs are boring. Just substitute into the chain. */ if (len == 0 && !cp_unevaluated_operand) RETURN (tsubst (TREE_CHAIN (t), args, complain, TREE_CHAIN (t))); } else { /* All we did was update the type. Make a note of that. */ type = expanded_types; expanded_types = NULL_TREE; } } /* Loop through all of the parameters we'll build. When T is a function parameter pack, LEN is the number of expanded types in EXPANDED_TYPES; otherwise, LEN is 1. */ r = NULL_TREE; for (i = 0; i < len; ++i) { prev_r = r; r = copy_node (t); if (DECL_TEMPLATE_PARM_P (t)) SET_DECL_TEMPLATE_PARM_P (r); if (expanded_types) /* We're on the Ith parameter of the function parameter pack. */ { /* Get the Ith type. */ type = TREE_VEC_ELT (expanded_types, i); /* Rename the parameter to include the index. */ DECL_NAME (r) = make_ith_pack_parameter_name (DECL_NAME (r), i); } else if (!type) /* We're dealing with a normal parameter. */ type = tsubst (TREE_TYPE (t), args, complain, in_decl); type = type_decays_to (type); TREE_TYPE (r) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), r); if (DECL_INITIAL (r)) { if (TREE_CODE (DECL_INITIAL (r)) != TEMPLATE_PARM_INDEX) DECL_INITIAL (r) = TREE_TYPE (r); else DECL_INITIAL (r) = tsubst (DECL_INITIAL (r), args, complain, in_decl); } DECL_CONTEXT (r) = NULL_TREE; if (!DECL_TEMPLATE_PARM_P (r)) DECL_ARG_TYPE (r) = type_passed_as (type); apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0, args, complain, in_decl); /* Keep track of the first new parameter we generate. That's what will be returned to the caller. */ if (!first_r) first_r = r; /* Build a proper chain of parameters when substituting into a function parameter pack. */ if (prev_r) DECL_CHAIN (prev_r) = r; } /* If cp_unevaluated_operand is set, we're just looking for a single dummy parameter, so don't keep going. */ if (DECL_CHAIN (t) && !cp_unevaluated_operand) DECL_CHAIN (r) = tsubst (DECL_CHAIN (t), args, complain, DECL_CHAIN (t)); /* FIRST_R contains the start of the chain we've built. */ r = first_r; } break; case FIELD_DECL: { tree type = NULL_TREE; tree vec = NULL_TREE; tree expanded_types = NULL_TREE; int len = 1; if (PACK_EXPANSION_P (TREE_TYPE (t))) { /* This field is a lambda capture pack. Return a TREE_VEC of the expanded fields to instantiate_class_template_1. */ expanded_types = tsubst_pack_expansion (TREE_TYPE (t), args, complain, in_decl); if (TREE_CODE (expanded_types) == TREE_VEC) { len = TREE_VEC_LENGTH (expanded_types); vec = make_tree_vec (len); } else { /* All we did was update the type. Make a note of that. */ type = expanded_types; expanded_types = NULL_TREE; } } for (int i = 0; i < len; ++i) { r = copy_decl (t); if (expanded_types) { type = TREE_VEC_ELT (expanded_types, i); DECL_NAME (r) = make_ith_pack_parameter_name (DECL_NAME (r), i); } else if (!type) type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (type == error_mark_node) RETURN (error_mark_node); TREE_TYPE (r) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), r); if (DECL_C_BIT_FIELD (r)) /* For bit-fields, DECL_BIT_FIELD_REPRESENTATIVE gives the number of bits. */ DECL_BIT_FIELD_REPRESENTATIVE (r) = tsubst_expr (DECL_BIT_FIELD_REPRESENTATIVE (t), args, complain, in_decl, /*integral_constant_expression_p=*/true); if (DECL_INITIAL (t)) { /* Set up DECL_TEMPLATE_INFO so that we can get at the NSDMI in perform_member_init. Still set DECL_INITIAL so that we know there is one. */ DECL_INITIAL (r) = void_node; gcc_assert (DECL_LANG_SPECIFIC (r) == NULL); retrofit_lang_decl (r); DECL_TEMPLATE_INFO (r) = build_template_info (t, args); } /* We don't have to set DECL_CONTEXT here; it is set by finish_member_declaration. */ DECL_CHAIN (r) = NULL_TREE; apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), 0, args, complain, in_decl); if (vec) TREE_VEC_ELT (vec, i) = r; } if (vec) r = vec; } break; case USING_DECL: /* We reach here only for member using decls. We also need to check uses_template_parms because DECL_DEPENDENT_P is not set for a using-declaration that designates a member of the current instantiation (c++/53549). */ if (DECL_DEPENDENT_P (t) || uses_template_parms (USING_DECL_SCOPE (t))) { tree scope = USING_DECL_SCOPE (t); tree name = tsubst_copy (DECL_NAME (t), args, complain, in_decl); if (PACK_EXPANSION_P (scope)) { tree vec = tsubst_pack_expansion (scope, args, complain, in_decl); int len = TREE_VEC_LENGTH (vec); r = make_tree_vec (len); for (int i = 0; i < len; ++i) { tree escope = TREE_VEC_ELT (vec, i); tree elt = do_class_using_decl (escope, name); if (!elt) { r = error_mark_node; break; } else { TREE_PROTECTED (elt) = TREE_PROTECTED (t); TREE_PRIVATE (elt) = TREE_PRIVATE (t); } TREE_VEC_ELT (r, i) = elt; } } else { tree inst_scope = tsubst_copy (USING_DECL_SCOPE (t), args, complain, in_decl); r = do_class_using_decl (inst_scope, name); if (!r) r = error_mark_node; else { TREE_PROTECTED (r) = TREE_PROTECTED (t); TREE_PRIVATE (r) = TREE_PRIVATE (t); } } } else { r = copy_node (t); DECL_CHAIN (r) = NULL_TREE; } break; case TYPE_DECL: case VAR_DECL: { tree argvec = NULL_TREE; tree gen_tmpl = NULL_TREE; tree tmpl = NULL_TREE; tree type = NULL_TREE; if (TREE_TYPE (t) == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (t) == TYPE_DECL && t == TYPE_MAIN_DECL (TREE_TYPE (t))) { /* If this is the canonical decl, we don't have to mess with instantiations, and often we can't (for typename, template type parms and such). Note that TYPE_NAME is not correct for the above test if we've copied the type for a typedef. */ type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (type == error_mark_node) RETURN (error_mark_node); r = TYPE_NAME (type); break; } /* Check to see if we already have the specialization we need. */ tree spec = NULL_TREE; bool local_p = false; tree ctx = DECL_CONTEXT (t); if (!(VAR_P (t) && DECL_LOCAL_DECL_P (t)) && (DECL_CLASS_SCOPE_P (t) || DECL_NAMESPACE_SCOPE_P (t))) { local_p = false; if (DECL_CLASS_SCOPE_P (t)) { ctx = tsubst_aggr_type (ctx, args, complain, in_decl, /*entering_scope=*/1); /* If CTX is unchanged, then T is in fact the specialization we want. That situation occurs when referencing a static data member within in its own class. We can use pointer equality, rather than same_type_p, because DECL_CONTEXT is always canonical... */ if (ctx == DECL_CONTEXT (t) /* ... unless T is a member template; in which case our caller can be willing to create a specialization of that template represented by T. */ && !(DECL_TI_TEMPLATE (t) && DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (t)))) spec = t; } if (!spec) { tmpl = DECL_TI_TEMPLATE (t); gen_tmpl = most_general_template (tmpl); argvec = tsubst (DECL_TI_ARGS (t), args, complain, in_decl); if (argvec != error_mark_node) argvec = (coerce_innermost_template_parms (DECL_TEMPLATE_PARMS (gen_tmpl), argvec, t, complain, /*all*/true, /*defarg*/true)); if (argvec == error_mark_node) RETURN (error_mark_node); hash = hash_tmpl_and_args (gen_tmpl, argvec); spec = retrieve_specialization (gen_tmpl, argvec, hash); } } else { if (!(VAR_P (t) && DECL_LOCAL_DECL_P (t))) /* Subsequent calls to pushdecl will fill this in. */ ctx = NULL_TREE; /* A local variable. */ local_p = true; /* Unless this is a reference to a static variable from an enclosing function, in which case we need to fill it in now. */ if (TREE_STATIC (t)) { tree fn = enclosing_instantiation_of (DECL_CONTEXT (t)); if (fn != current_function_decl) ctx = fn; } spec = retrieve_local_specialization (t); } /* If we already have the specialization we need, there is nothing more to do. */ if (spec) { r = spec; break; } /* Create a new node for the specialization we need. */ if (type == NULL_TREE) { if (is_typedef_decl (t)) type = DECL_ORIGINAL_TYPE (t); else type = TREE_TYPE (t); if (VAR_P (t) && VAR_HAD_UNKNOWN_BOUND (t) && type != error_mark_node) type = strip_array_domain (type); tree sub_args = args; if (tree auto_node = type_uses_auto (type)) { /* Mask off any template args past the variable's context so we don't replace the auto with an unrelated argument. */ int nouter = TEMPLATE_TYPE_LEVEL (auto_node) - 1; int extra = TMPL_ARGS_DEPTH (args) - nouter; if (extra > 0) /* This should never happen with the new lambda instantiation model, but keep the handling just in case. */ gcc_assert (!CHECKING_P), sub_args = strip_innermost_template_args (args, extra); } type = tsubst (type, sub_args, complain, in_decl); /* Substituting the type might have recursively instantiated this same alias (c++/86171). */ if (gen_tmpl && DECL_ALIAS_TEMPLATE_P (gen_tmpl) && (spec = retrieve_specialization (gen_tmpl, argvec, hash))) { r = spec; break; } } r = copy_decl (t); if (VAR_P (r)) { DECL_INITIALIZED_P (r) = 0; DECL_TEMPLATE_INSTANTIATED (r) = 0; if (type == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (type) == FUNCTION_TYPE) { /* It may seem that this case cannot occur, since: typedef void f(); void g() { f x; } declares a function, not a variable. However: typedef void f(); template <typename T> void g() { T t; } template void g<f>(); is an attempt to declare a variable with function type. */ error ("variable %qD has function type", /* R is not yet sufficiently initialized, so we just use its name. */ DECL_NAME (r)); RETURN (error_mark_node); } type = complete_type (type); /* Wait until cp_finish_decl to set this again, to handle circular dependency (template/instantiate6.C). */ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r) = 0; type = check_var_type (DECL_NAME (r), type, DECL_SOURCE_LOCATION (r)); if (DECL_HAS_VALUE_EXPR_P (t)) { tree ve = DECL_VALUE_EXPR (t); /* If the DECL_VALUE_EXPR is converted to the declared type, preserve the identity so that gimplify_type_sizes works. */ bool nop = (TREE_CODE (ve) == NOP_EXPR); if (nop) ve = TREE_OPERAND (ve, 0); ve = tsubst_expr (ve, args, complain, in_decl, /*constant_expression_p=*/false); if (REFERENCE_REF_P (ve)) { gcc_assert (TYPE_REF_P (type)); ve = TREE_OPERAND (ve, 0); } if (nop) ve = build_nop (type, ve); else if (DECL_LANG_SPECIFIC (t) && DECL_OMP_PRIVATIZED_MEMBER (t) && TREE_CODE (ve) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (ve, 1)) == FIELD_DECL && DECL_BIT_FIELD_TYPE (TREE_OPERAND (ve, 1)) == type) type = TREE_TYPE (ve); else gcc_checking_assert (TYPE_MAIN_VARIANT (TREE_TYPE (ve)) == TYPE_MAIN_VARIANT (type)); SET_DECL_VALUE_EXPR (r, ve); } if (CP_DECL_THREAD_LOCAL_P (r) && !processing_template_decl) set_decl_tls_model (r, decl_default_tls_model (r)); } else if (DECL_SELF_REFERENCE_P (t)) SET_DECL_SELF_REFERENCE_P (r); TREE_TYPE (r) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), r); DECL_CONTEXT (r) = ctx; /* Clear out the mangled name and RTL for the instantiation. */ SET_DECL_ASSEMBLER_NAME (r, NULL_TREE); if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL)) SET_DECL_RTL (r, NULL); /* The initializer must not be expanded until it is required; see [temp.inst]. */ DECL_INITIAL (r) = NULL_TREE; DECL_SIZE (r) = DECL_SIZE_UNIT (r) = 0; if (VAR_P (r)) { if (DECL_LANG_SPECIFIC (r)) SET_DECL_DEPENDENT_INIT_P (r, false); SET_DECL_MODE (r, VOIDmode); /* Possibly limit visibility based on template args. */ DECL_VISIBILITY (r) = VISIBILITY_DEFAULT; if (DECL_VISIBILITY_SPECIFIED (t)) { DECL_VISIBILITY_SPECIFIED (r) = 0; DECL_ATTRIBUTES (r) = remove_attribute ("visibility", DECL_ATTRIBUTES (r)); } determine_visibility (r); } if (!local_p) { /* A static data member declaration is always marked external when it is declared in-class, even if an initializer is present. We mimic the non-template processing here. */ DECL_EXTERNAL (r) = 1; if (DECL_NAMESPACE_SCOPE_P (t)) DECL_NOT_REALLY_EXTERN (r) = 1; DECL_TEMPLATE_INFO (r) = build_template_info (tmpl, argvec); SET_DECL_IMPLICIT_INSTANTIATION (r); if (!error_operand_p (r) || (complain & tf_error)) register_specialization (r, gen_tmpl, argvec, false, hash); } else { if (DECL_LANG_SPECIFIC (r)) DECL_TEMPLATE_INFO (r) = NULL_TREE; if (!cp_unevaluated_operand) register_local_specialization (r, t); } DECL_CHAIN (r) = NULL_TREE; apply_late_template_attributes (&r, DECL_ATTRIBUTES (r), /*flags=*/0, args, complain, in_decl); /* Preserve a typedef that names a type. */ if (is_typedef_decl (r) && type != error_mark_node) { DECL_ORIGINAL_TYPE (r) = NULL_TREE; set_underlying_type (r); if (TYPE_DECL_ALIAS_P (r)) /* An alias template specialization can be dependent even if its underlying type is not. */ TYPE_DEPENDENT_P_VALID (TREE_TYPE (r)) = false; } layout_decl (r, 0); } break; default: gcc_unreachable (); } #undef RETURN out: /* Restore the file and line information. */ input_location = saved_loc; return r; } /* Substitute into the complete parameter type list PARMS. */ tree tsubst_function_parms (tree parms, tree args, tsubst_flags_t complain, tree in_decl) { return tsubst_arg_types (parms, args, NULL_TREE, complain, in_decl); } /* Substitute into the ARG_TYPES of a function type. If END is a TREE_CHAIN, leave it and any following types un-substituted. */ static tree tsubst_arg_types (tree arg_types, tree args, tree end, tsubst_flags_t complain, tree in_decl) { tree remaining_arg_types; tree type = NULL_TREE; int i = 1; tree expanded_args = NULL_TREE; tree default_arg; if (!arg_types || arg_types == void_list_node || arg_types == end) return arg_types; remaining_arg_types = tsubst_arg_types (TREE_CHAIN (arg_types), args, end, complain, in_decl); if (remaining_arg_types == error_mark_node) return error_mark_node; if (PACK_EXPANSION_P (TREE_VALUE (arg_types))) { /* For a pack expansion, perform substitution on the entire expression. Later on, we'll handle the arguments one-by-one. */ expanded_args = tsubst_pack_expansion (TREE_VALUE (arg_types), args, complain, in_decl); if (TREE_CODE (expanded_args) == TREE_VEC) /* So that we'll spin through the parameters, one by one. */ i = TREE_VEC_LENGTH (expanded_args); else { /* We only partially substituted into the parameter pack. Our type is TYPE_PACK_EXPANSION. */ type = expanded_args; expanded_args = NULL_TREE; } } while (i > 0) { --i; if (expanded_args) type = TREE_VEC_ELT (expanded_args, i); else if (!type) type = tsubst (TREE_VALUE (arg_types), args, complain, in_decl); if (type == error_mark_node) return error_mark_node; if (VOID_TYPE_P (type)) { if (complain & tf_error) { error ("invalid parameter type %qT", type); if (in_decl) error ("in declaration %q+D", in_decl); } return error_mark_node; } /* DR 657. */ if (abstract_virtuals_error_sfinae (ACU_PARM, type, complain)) return error_mark_node; /* Do array-to-pointer, function-to-pointer conversion, and ignore top-level qualifiers as required. */ type = cv_unqualified (type_decays_to (type)); /* We do not substitute into default arguments here. The standard mandates that they be instantiated only when needed, which is done in build_over_call. */ default_arg = TREE_PURPOSE (arg_types); /* Except that we do substitute default arguments under tsubst_lambda_expr, since the new op() won't have any associated template arguments for us to refer to later. */ if (lambda_fn_in_template_p (in_decl)) default_arg = tsubst_copy_and_build (default_arg, args, complain, in_decl, false/*fn*/, false/*constexpr*/); if (default_arg && TREE_CODE (default_arg) == DEFERRED_PARSE) { /* We've instantiated a template before its default arguments have been parsed. This can happen for a nested template class, and is not an error unless we require the default argument in a call of this function. */ remaining_arg_types = tree_cons (default_arg, type, remaining_arg_types); vec_safe_push (DEFPARSE_INSTANTIATIONS (default_arg), remaining_arg_types); } else remaining_arg_types = hash_tree_cons (default_arg, type, remaining_arg_types); } return remaining_arg_types; } /* Substitute into a FUNCTION_TYPE or METHOD_TYPE. This routine does *not* handle the exception-specification for FNTYPE, because the initial substitution of explicitly provided template parameters during argument deduction forbids substitution into the exception-specification: [temp.deduct] All references in the function type of the function template to the corresponding template parameters are replaced by the specified tem- plate argument values. If a substitution in a template parameter or in the function type of the function template results in an invalid type, type deduction fails. [Note: The equivalent substitution in exception specifications is done only when the function is instanti- ated, at which point a program is ill-formed if the substitution results in an invalid type.] */ static tree tsubst_function_type (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree return_type; tree arg_types = NULL_TREE; /* The TYPE_CONTEXT is not used for function/method types. */ gcc_assert (TYPE_CONTEXT (t) == NULL_TREE); /* DR 1227: Mixing immediate and non-immediate contexts in deduction failure. */ bool late_return_type_p = TYPE_HAS_LATE_RETURN_TYPE (t); if (late_return_type_p) { /* Substitute the argument types. */ arg_types = tsubst_arg_types (TYPE_ARG_TYPES (t), args, NULL_TREE, complain, in_decl); if (arg_types == error_mark_node) return error_mark_node; tree save_ccp = current_class_ptr; tree save_ccr = current_class_ref; tree this_type = (TREE_CODE (t) == METHOD_TYPE ? TREE_TYPE (TREE_VALUE (arg_types)) : NULL_TREE); bool do_inject = this_type && CLASS_TYPE_P (this_type); if (do_inject) { /* DR 1207: 'this' is in scope in the trailing return type. */ inject_this_parameter (this_type, cp_type_quals (this_type)); } /* Substitute the return type. */ return_type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (do_inject) { current_class_ptr = save_ccp; current_class_ref = save_ccr; } } else /* Substitute the return type. */ return_type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (return_type == error_mark_node) return error_mark_node; /* DR 486 clarifies that creation of a function type with an invalid return type is a deduction failure. */ if (TREE_CODE (return_type) == ARRAY_TYPE || TREE_CODE (return_type) == FUNCTION_TYPE) { if (complain & tf_error) { if (TREE_CODE (return_type) == ARRAY_TYPE) error ("function returning an array"); else error ("function returning a function"); } return error_mark_node; } /* And DR 657. */ if (abstract_virtuals_error_sfinae (ACU_RETURN, return_type, complain)) return error_mark_node; if (!late_return_type_p) { /* Substitute the argument types. */ arg_types = tsubst_arg_types (TYPE_ARG_TYPES (t), args, NULL_TREE, complain, in_decl); if (arg_types == error_mark_node) return error_mark_node; } /* Construct a new type node and return it. */ return rebuild_function_or_method_type (t, return_type, arg_types, /*raises=*/NULL_TREE, complain); } /* FNTYPE is a FUNCTION_TYPE or METHOD_TYPE. Substitute the template ARGS into that specification, and return the substituted specification. If there is no specification, return NULL_TREE. */ static tree tsubst_exception_specification (tree fntype, tree args, tsubst_flags_t complain, tree in_decl, bool defer_ok) { tree specs; tree new_specs; specs = TYPE_RAISES_EXCEPTIONS (fntype); new_specs = NULL_TREE; if (specs && TREE_PURPOSE (specs)) { /* A noexcept-specifier. */ tree expr = TREE_PURPOSE (specs); if (TREE_CODE (expr) == INTEGER_CST) new_specs = expr; else if (defer_ok) { /* Defer instantiation of noexcept-specifiers to avoid excessive instantiations (c++/49107). */ new_specs = make_node (DEFERRED_NOEXCEPT); if (DEFERRED_NOEXCEPT_SPEC_P (specs)) { /* We already partially instantiated this member template, so combine the new args with the old. */ DEFERRED_NOEXCEPT_PATTERN (new_specs) = DEFERRED_NOEXCEPT_PATTERN (expr); DEFERRED_NOEXCEPT_ARGS (new_specs) = add_to_template_args (DEFERRED_NOEXCEPT_ARGS (expr), args); } else { DEFERRED_NOEXCEPT_PATTERN (new_specs) = expr; DEFERRED_NOEXCEPT_ARGS (new_specs) = args; } } else { if (DEFERRED_NOEXCEPT_SPEC_P (specs)) { args = add_to_template_args (DEFERRED_NOEXCEPT_ARGS (expr), args); expr = DEFERRED_NOEXCEPT_PATTERN (expr); } new_specs = tsubst_copy_and_build (expr, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/true); } new_specs = build_noexcept_spec (new_specs, complain); } else if (specs) { if (! TREE_VALUE (specs)) new_specs = specs; else while (specs) { tree spec; int i, len = 1; tree expanded_specs = NULL_TREE; if (PACK_EXPANSION_P (TREE_VALUE (specs))) { /* Expand the pack expansion type. */ expanded_specs = tsubst_pack_expansion (TREE_VALUE (specs), args, complain, in_decl); if (expanded_specs == error_mark_node) return error_mark_node; else if (TREE_CODE (expanded_specs) == TREE_VEC) len = TREE_VEC_LENGTH (expanded_specs); else { /* We're substituting into a member template, so we got a TYPE_PACK_EXPANSION back. Add that expansion and move on. */ gcc_assert (TREE_CODE (expanded_specs) == TYPE_PACK_EXPANSION); new_specs = add_exception_specifier (new_specs, expanded_specs, complain); specs = TREE_CHAIN (specs); continue; } } for (i = 0; i < len; ++i) { if (expanded_specs) spec = TREE_VEC_ELT (expanded_specs, i); else spec = tsubst (TREE_VALUE (specs), args, complain, in_decl); if (spec == error_mark_node) return spec; new_specs = add_exception_specifier (new_specs, spec, complain); } specs = TREE_CHAIN (specs); } } return new_specs; } /* Substitute through a TREE_LIST of types or expressions, handling pack expansions. */ tree tsubst_tree_list (tree t, tree args, tsubst_flags_t complain, tree in_decl) { if (t == void_list_node) return t; tree purpose = TREE_PURPOSE (t); tree purposevec = NULL_TREE; if (!purpose) ; else if (PACK_EXPANSION_P (purpose)) { purpose = tsubst_pack_expansion (purpose, args, complain, in_decl); if (TREE_CODE (purpose) == TREE_VEC) purposevec = purpose; } else if (TYPE_P (purpose)) purpose = tsubst (purpose, args, complain, in_decl); else purpose = tsubst_copy_and_build (purpose, args, complain, in_decl); if (purpose == error_mark_node || purposevec == error_mark_node) return error_mark_node; tree value = TREE_VALUE (t); tree valuevec = NULL_TREE; if (!value) ; else if (PACK_EXPANSION_P (value)) { value = tsubst_pack_expansion (value, args, complain, in_decl); if (TREE_CODE (value) == TREE_VEC) valuevec = value; } else if (TYPE_P (value)) value = tsubst (value, args, complain, in_decl); else value = tsubst_copy_and_build (value, args, complain, in_decl); if (value == error_mark_node || valuevec == error_mark_node) return error_mark_node; tree chain = TREE_CHAIN (t); if (!chain) ; else if (TREE_CODE (chain) == TREE_LIST) chain = tsubst_tree_list (chain, args, complain, in_decl); else if (TYPE_P (chain)) chain = tsubst (chain, args, complain, in_decl); else chain = tsubst_copy_and_build (chain, args, complain, in_decl); if (chain == error_mark_node) return error_mark_node; if (purpose == TREE_PURPOSE (t) && value == TREE_VALUE (t) && chain == TREE_CHAIN (t)) return t; int len; /* Determine the number of arguments. */ if (purposevec) { len = TREE_VEC_LENGTH (purposevec); gcc_assert (!valuevec || len == TREE_VEC_LENGTH (valuevec)); } else if (valuevec) len = TREE_VEC_LENGTH (valuevec); else len = 1; for (int i = len; i-- > 0; ) { if (purposevec) purpose = TREE_VEC_ELT (purposevec, i); if (valuevec) value = TREE_VEC_ELT (valuevec, i); if (value && TYPE_P (value)) chain = hash_tree_cons (purpose, value, chain); else chain = tree_cons (purpose, value, chain); } return chain; } /* Take the tree structure T and replace template parameters used therein with the argument vector ARGS. IN_DECL is an associated decl for diagnostics. If an error occurs, returns ERROR_MARK_NODE. Issue error and warning messages under control of COMPLAIN. Note that we must be relatively non-tolerant of extensions here, in order to preserve conformance; if we allow substitutions that should not be allowed, we may allow argument deductions that should not succeed, and therefore report ambiguous overload situations where there are none. In theory, we could allow the substitution, but indicate that it should have failed, and allow our caller to make sure that the right thing happens, but we don't try to do this yet. This function is used for dealing with types, decls and the like; for expressions, use tsubst_expr or tsubst_copy. */ tree tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl) { enum tree_code code; tree type, r = NULL_TREE; if (t == NULL_TREE || t == error_mark_node || t == integer_type_node || t == void_type_node || t == char_type_node || t == unknown_type_node || TREE_CODE (t) == NAMESPACE_DECL || TREE_CODE (t) == TRANSLATION_UNIT_DECL) return t; if (DECL_P (t)) return tsubst_decl (t, args, complain); if (args == NULL_TREE) return t; code = TREE_CODE (t); if (code == IDENTIFIER_NODE) type = IDENTIFIER_TYPE_VALUE (t); else type = TREE_TYPE (t); gcc_assert (type != unknown_type_node); /* Reuse typedefs. We need to do this to handle dependent attributes, such as attribute aligned. */ if (TYPE_P (t) && typedef_variant_p (t)) { tree decl = TYPE_NAME (t); if (alias_template_specialization_p (t, nt_opaque)) { /* DECL represents an alias template and we want to instantiate it. */ tree tmpl = most_general_template (DECL_TI_TEMPLATE (decl)); tree gen_args = tsubst (DECL_TI_ARGS (decl), args, complain, in_decl); r = instantiate_alias_template (tmpl, gen_args, complain); } else if (DECL_CLASS_SCOPE_P (decl) && CLASSTYPE_TEMPLATE_INFO (DECL_CONTEXT (decl)) && uses_template_parms (DECL_CONTEXT (decl))) { tree tmpl = most_general_template (DECL_TI_TEMPLATE (decl)); tree gen_args = tsubst (DECL_TI_ARGS (decl), args, complain, in_decl); r = retrieve_specialization (tmpl, gen_args, 0); } else if (DECL_FUNCTION_SCOPE_P (decl) && DECL_TEMPLATE_INFO (DECL_CONTEXT (decl)) && uses_template_parms (DECL_TI_ARGS (DECL_CONTEXT (decl)))) r = retrieve_local_specialization (decl); else /* The typedef is from a non-template context. */ return t; if (r) { r = TREE_TYPE (r); r = cp_build_qualified_type_real (r, cp_type_quals (t) | cp_type_quals (r), complain | tf_ignore_bad_quals); return r; } else { /* We don't have an instantiation yet, so drop the typedef. */ int quals = cp_type_quals (t); t = DECL_ORIGINAL_TYPE (decl); t = cp_build_qualified_type_real (t, quals, complain | tf_ignore_bad_quals); } } bool fndecl_type = (complain & tf_fndecl_type); complain &= ~tf_fndecl_type; if (type && code != TYPENAME_TYPE && code != TEMPLATE_TYPE_PARM && code != TEMPLATE_PARM_INDEX && code != IDENTIFIER_NODE && code != FUNCTION_TYPE && code != METHOD_TYPE) type = tsubst (type, args, complain, in_decl); if (type == error_mark_node) return error_mark_node; switch (code) { case RECORD_TYPE: case UNION_TYPE: case ENUMERAL_TYPE: return tsubst_aggr_type (t, args, complain, in_decl, /*entering_scope=*/0); case ERROR_MARK: case IDENTIFIER_NODE: case VOID_TYPE: case REAL_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case BOOLEAN_TYPE: case NULLPTR_TYPE: case LANG_TYPE: return t; case INTEGER_TYPE: if (t == integer_type_node) return t; if (TREE_CODE (TYPE_MIN_VALUE (t)) == INTEGER_CST && TREE_CODE (TYPE_MAX_VALUE (t)) == INTEGER_CST) return t; { tree max, omax = TREE_OPERAND (TYPE_MAX_VALUE (t), 0); max = tsubst_expr (omax, args, complain, in_decl, /*integral_constant_expression_p=*/false); /* Fix up type of the magic NOP_EXPR with TREE_SIDE_EFFECTS if needed. */ if (TREE_CODE (max) == NOP_EXPR && TREE_SIDE_EFFECTS (omax) && !TREE_TYPE (max)) TREE_TYPE (max) = TREE_TYPE (TREE_OPERAND (max, 0)); /* If we're in a partial instantiation, preserve the magic NOP_EXPR with TREE_SIDE_EFFECTS that indicates this is not an integral constant expression. */ if (processing_template_decl && TREE_SIDE_EFFECTS (omax) && TREE_CODE (omax) == NOP_EXPR) { gcc_assert (TREE_CODE (max) == NOP_EXPR); TREE_SIDE_EFFECTS (max) = 1; } return compute_array_index_type (NULL_TREE, max, complain); } case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: case TEMPLATE_PARM_INDEX: { int idx; int level; int levels; tree arg = NULL_TREE; r = NULL_TREE; gcc_assert (TREE_VEC_LENGTH (args) > 0); template_parm_level_and_index (t, &level, &idx); levels = TMPL_ARGS_DEPTH (args); if (level <= levels && TREE_VEC_LENGTH (TMPL_ARGS_LEVEL (args, level)) > 0) { arg = TMPL_ARG (args, level, idx); /* See through ARGUMENT_PACK_SELECT arguments. */ if (arg && TREE_CODE (arg) == ARGUMENT_PACK_SELECT) arg = argument_pack_select_arg (arg); } if (arg == error_mark_node) return error_mark_node; else if (arg != NULL_TREE) { if (ARGUMENT_PACK_P (arg)) /* If ARG is an argument pack, we don't actually want to perform a substitution here, because substitutions for argument packs are only done element-by-element. We can get to this point when substituting the type of a non-type template parameter pack, when that type actually contains template parameter packs from an outer template, e.g., template<typename... Types> struct A { template<Types... Values> struct B { }; }; */ return t; if (code == TEMPLATE_TYPE_PARM) { int quals; /* When building concept checks for the purpose of deducing placeholders, we can end up with wildcards where types are expected. Adjust this to the deduced value. */ if (TREE_CODE (arg) == WILDCARD_DECL) arg = TREE_TYPE (TREE_TYPE (arg)); gcc_assert (TYPE_P (arg)); quals = cp_type_quals (arg) | cp_type_quals (t); return cp_build_qualified_type_real (arg, quals, complain | tf_ignore_bad_quals); } else if (code == BOUND_TEMPLATE_TEMPLATE_PARM) { /* We are processing a type constructed from a template template parameter. */ tree argvec = tsubst (TYPE_TI_ARGS (t), args, complain, in_decl); if (argvec == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (arg) == TEMPLATE_DECL || TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE); if (TREE_CODE (arg) == UNBOUND_CLASS_TEMPLATE) /* Consider this code: template <template <class> class Template> struct Internal { template <class Arg> using Bind = Template<Arg>; }; template <template <class> class Template, class Arg> using Instantiate = Template<Arg>; //#0 template <template <class> class Template, class Argument> using Bind = Instantiate<Internal<Template>::template Bind, Argument>; //#1 When #1 is parsed, the BOUND_TEMPLATE_TEMPLATE_PARM representing the parameter `Template' in #0 matches the UNBOUND_CLASS_TEMPLATE representing the argument `Internal<Template>::template Bind'; We then want to assemble the type `Bind<Argument>' that can't be fully created right now, because `Internal<Template>' not being complete, the Bind template cannot be looked up in that context. So we need to "store" `Bind<Argument>' for later when the context of Bind becomes complete. Let's store that in a TYPENAME_TYPE. */ return make_typename_type (TYPE_CONTEXT (arg), build_nt (TEMPLATE_ID_EXPR, TYPE_IDENTIFIER (arg), argvec), typename_type, complain); /* We can get a TEMPLATE_TEMPLATE_PARM here when we are resolving nested-types in the signature of a member function templates. Otherwise ARG is a TEMPLATE_DECL and is the real template to be instantiated. */ if (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM) arg = TYPE_NAME (arg); r = lookup_template_class (arg, argvec, in_decl, DECL_CONTEXT (arg), /*entering_scope=*/0, complain); return cp_build_qualified_type_real (r, cp_type_quals (t) | cp_type_quals (r), complain); } else if (code == TEMPLATE_TEMPLATE_PARM) return arg; else /* TEMPLATE_PARM_INDEX. */ return convert_from_reference (unshare_expr (arg)); } if (level == 1) /* This can happen during the attempted tsubst'ing in unify. This means that we don't yet have any information about the template parameter in question. */ return t; /* Early in template argument deduction substitution, we don't want to reduce the level of 'auto', or it will be confused with a normal template parm in subsequent deduction. Similarly, don't reduce the level of template parameters to avoid mismatches when deducing their types. */ if (complain & tf_partial) return t; /* If we get here, we must have been looking at a parm for a more deeply nested template. Make a new version of this template parameter, but with a lower level. */ switch (code) { case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: if (cp_type_quals (t)) { r = tsubst (TYPE_MAIN_VARIANT (t), args, complain, in_decl); r = cp_build_qualified_type_real (r, cp_type_quals (t), complain | (code == TEMPLATE_TYPE_PARM ? tf_ignore_bad_quals : 0)); } else if (TREE_CODE (t) == TEMPLATE_TYPE_PARM && PLACEHOLDER_TYPE_CONSTRAINTS (t) && (r = (TEMPLATE_PARM_DESCENDANTS (TEMPLATE_TYPE_PARM_INDEX (t)))) && (r = TREE_TYPE (r)) && !PLACEHOLDER_TYPE_CONSTRAINTS (r)) /* Break infinite recursion when substituting the constraints of a constrained placeholder. */; else if (TREE_CODE (t) == TEMPLATE_TYPE_PARM && !PLACEHOLDER_TYPE_CONSTRAINTS (t) && !CLASS_PLACEHOLDER_TEMPLATE (t) && (arg = TEMPLATE_TYPE_PARM_INDEX (t), r = TEMPLATE_PARM_DESCENDANTS (arg)) && (TEMPLATE_PARM_LEVEL (r) == TEMPLATE_PARM_LEVEL (arg) - levels)) /* Cache the simple case of lowering a type parameter. */ r = TREE_TYPE (r); else { r = copy_type (t); TEMPLATE_TYPE_PARM_INDEX (r) = reduce_template_parm_level (TEMPLATE_TYPE_PARM_INDEX (t), r, levels, args, complain); TYPE_STUB_DECL (r) = TYPE_NAME (r) = TEMPLATE_TYPE_DECL (r); TYPE_MAIN_VARIANT (r) = r; TYPE_POINTER_TO (r) = NULL_TREE; TYPE_REFERENCE_TO (r) = NULL_TREE; if (TREE_CODE (t) == TEMPLATE_TYPE_PARM) { /* Propagate constraints on placeholders since they are only instantiated during satisfaction. */ if (tree constr = PLACEHOLDER_TYPE_CONSTRAINTS (t)) PLACEHOLDER_TYPE_CONSTRAINTS (r) = constr; else if (tree pl = CLASS_PLACEHOLDER_TEMPLATE (t)) { pl = tsubst_copy (pl, args, complain, in_decl); CLASS_PLACEHOLDER_TEMPLATE (r) = pl; } } if (TREE_CODE (r) == TEMPLATE_TEMPLATE_PARM) /* We have reduced the level of the template template parameter, but not the levels of its template parameters, so canonical_type_parameter will not be able to find the canonical template template parameter for this level. Thus, we require structural equality checking to compare TEMPLATE_TEMPLATE_PARMs. */ SET_TYPE_STRUCTURAL_EQUALITY (r); else if (TYPE_STRUCTURAL_EQUALITY_P (t)) SET_TYPE_STRUCTURAL_EQUALITY (r); else TYPE_CANONICAL (r) = canonical_type_parameter (r); if (code == BOUND_TEMPLATE_TEMPLATE_PARM) { tree tinfo = TYPE_TEMPLATE_INFO (t); /* We might need to substitute into the types of non-type template parameters. */ tree tmpl = tsubst (TI_TEMPLATE (tinfo), args, complain, in_decl); if (tmpl == error_mark_node) return error_mark_node; tree argvec = tsubst (TI_ARGS (tinfo), args, complain, in_decl); if (argvec == error_mark_node) return error_mark_node; TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (r) = build_template_info (tmpl, argvec); } } break; case TEMPLATE_PARM_INDEX: /* OK, now substitute the type of the non-type parameter. We couldn't do it earlier because it might be an auto parameter, and we wouldn't need to if we had an argument. */ type = tsubst (type, args, complain, in_decl); if (type == error_mark_node) return error_mark_node; r = reduce_template_parm_level (t, type, levels, args, complain); break; default: gcc_unreachable (); } return r; } case TREE_LIST: return tsubst_tree_list (t, args, complain, in_decl); case TREE_BINFO: /* We should never be tsubsting a binfo. */ gcc_unreachable (); case TREE_VEC: /* A vector of template arguments. */ gcc_assert (!type); return tsubst_template_args (t, args, complain, in_decl); case POINTER_TYPE: case REFERENCE_TYPE: { if (type == TREE_TYPE (t) && TREE_CODE (type) != METHOD_TYPE) return t; /* [temp.deduct] Type deduction may fail for any of the following reasons: -- Attempting to create a pointer to reference type. -- Attempting to create a reference to a reference type or a reference to void. Core issue 106 says that creating a reference to a reference during instantiation is no longer a cause for failure. We only enforce this check in strict C++98 mode. */ if ((TYPE_REF_P (type) && (((cxx_dialect == cxx98) && flag_iso) || code != REFERENCE_TYPE)) || (code == REFERENCE_TYPE && VOID_TYPE_P (type))) { static location_t last_loc; /* We keep track of the last time we issued this error message to avoid spewing a ton of messages during a single bad template instantiation. */ if (complain & tf_error && last_loc != input_location) { if (VOID_TYPE_P (type)) error ("forming reference to void"); else if (code == POINTER_TYPE) error ("forming pointer to reference type %qT", type); else error ("forming reference to reference type %qT", type); last_loc = input_location; } return error_mark_node; } else if (TREE_CODE (type) == FUNCTION_TYPE && (type_memfn_quals (type) != TYPE_UNQUALIFIED || type_memfn_rqual (type) != REF_QUAL_NONE)) { if (complain & tf_error) { if (code == POINTER_TYPE) error ("forming pointer to qualified function type %qT", type); else error ("forming reference to qualified function type %qT", type); } return error_mark_node; } else if (code == POINTER_TYPE) { r = build_pointer_type (type); if (TREE_CODE (type) == METHOD_TYPE) r = build_ptrmemfunc_type (r); } else if (TYPE_REF_P (type)) /* In C++0x, during template argument substitution, when there is an attempt to create a reference to a reference type, reference collapsing is applied as described in [14.3.1/4 temp.arg.type]: "If a template-argument for a template-parameter T names a type that is a reference to a type A, an attempt to create the type 'lvalue reference to cv T' creates the type 'lvalue reference to A,' while an attempt to create the type type rvalue reference to cv T' creates the type T" */ r = cp_build_reference_type (TREE_TYPE (type), TYPE_REF_IS_RVALUE (t) && TYPE_REF_IS_RVALUE (type)); else r = cp_build_reference_type (type, TYPE_REF_IS_RVALUE (t)); r = cp_build_qualified_type_real (r, cp_type_quals (t), complain); if (r != error_mark_node) /* Will this ever be needed for TYPE_..._TO values? */ layout_type (r); return r; } case OFFSET_TYPE: { r = tsubst (TYPE_OFFSET_BASETYPE (t), args, complain, in_decl); if (r == error_mark_node || !MAYBE_CLASS_TYPE_P (r)) { /* [temp.deduct] Type deduction may fail for any of the following reasons: -- Attempting to create "pointer to member of T" when T is not a class type. */ if (complain & tf_error) error ("creating pointer to member of non-class type %qT", r); return error_mark_node; } if (TYPE_REF_P (type)) { if (complain & tf_error) error ("creating pointer to member reference type %qT", type); return error_mark_node; } if (VOID_TYPE_P (type)) { if (complain & tf_error) error ("creating pointer to member of type void"); return error_mark_node; } gcc_assert (TREE_CODE (type) != METHOD_TYPE); if (TREE_CODE (type) == FUNCTION_TYPE) { /* The type of the implicit object parameter gets its cv-qualifiers from the FUNCTION_TYPE. */ tree memptr; tree method_type = build_memfn_type (type, r, type_memfn_quals (type), type_memfn_rqual (type)); memptr = build_ptrmemfunc_type (build_pointer_type (method_type)); return cp_build_qualified_type_real (memptr, cp_type_quals (t), complain); } else return cp_build_qualified_type_real (build_ptrmem_type (r, type), cp_type_quals (t), complain); } case FUNCTION_TYPE: case METHOD_TYPE: { tree fntype; tree specs; fntype = tsubst_function_type (t, args, complain, in_decl); if (fntype == error_mark_node) return error_mark_node; /* Substitute the exception specification. */ specs = tsubst_exception_specification (t, args, complain, in_decl, /*defer_ok*/fndecl_type); if (specs == error_mark_node) return error_mark_node; if (specs) fntype = build_exception_variant (fntype, specs); return fntype; } case ARRAY_TYPE: { tree domain = tsubst (TYPE_DOMAIN (t), args, complain, in_decl); if (domain == error_mark_node) return error_mark_node; /* As an optimization, we avoid regenerating the array type if it will obviously be the same as T. */ if (type == TREE_TYPE (t) && domain == TYPE_DOMAIN (t)) return t; /* These checks should match the ones in create_array_type_for_decl. [temp.deduct] The deduction may fail for any of the following reasons: -- Attempting to create an array with an element type that is void, a function type, or a reference type, or [DR337] an abstract class type. */ if (VOID_TYPE_P (type) || TREE_CODE (type) == FUNCTION_TYPE || (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) || TYPE_REF_P (type)) { if (complain & tf_error) error ("creating array of %qT", type); return error_mark_node; } if (abstract_virtuals_error_sfinae (ACU_ARRAY, type, complain)) return error_mark_node; r = build_cplus_array_type (type, domain); if (!valid_array_size_p (input_location, r, in_decl, (complain & tf_error))) return error_mark_node; if (TYPE_USER_ALIGN (t)) { SET_TYPE_ALIGN (r, TYPE_ALIGN (t)); TYPE_USER_ALIGN (r) = 1; } return r; } case TYPENAME_TYPE: { tree ctx = TYPE_CONTEXT (t); if (TREE_CODE (ctx) == TYPE_PACK_EXPANSION) { ctx = tsubst_pack_expansion (ctx, args, complain, in_decl); if (ctx == error_mark_node || TREE_VEC_LENGTH (ctx) > 1) return error_mark_node; if (TREE_VEC_LENGTH (ctx) == 0) { if (complain & tf_error) error ("%qD is instantiated for an empty pack", TYPENAME_TYPE_FULLNAME (t)); return error_mark_node; } ctx = TREE_VEC_ELT (ctx, 0); } else ctx = tsubst_aggr_type (ctx, args, complain, in_decl, /*entering_scope=*/1); if (ctx == error_mark_node) return error_mark_node; tree f = tsubst_copy (TYPENAME_TYPE_FULLNAME (t), args, complain, in_decl); if (f == error_mark_node) return error_mark_node; if (!MAYBE_CLASS_TYPE_P (ctx)) { if (complain & tf_error) error ("%qT is not a class, struct, or union type", ctx); return error_mark_node; } else if (!uses_template_parms (ctx) && !TYPE_BEING_DEFINED (ctx)) { /* Normally, make_typename_type does not require that the CTX have complete type in order to allow things like: template <class T> struct S { typename S<T>::X Y; }; But, such constructs have already been resolved by this point, so here CTX really should have complete type, unless it's a partial instantiation. */ ctx = complete_type (ctx); if (!COMPLETE_TYPE_P (ctx)) { if (complain & tf_error) cxx_incomplete_type_error (NULL_TREE, ctx); return error_mark_node; } } f = make_typename_type (ctx, f, typename_type, complain | tf_keep_type_decl); if (f == error_mark_node) return f; if (TREE_CODE (f) == TYPE_DECL) { complain |= tf_ignore_bad_quals; f = TREE_TYPE (f); } if (TREE_CODE (f) != TYPENAME_TYPE) { if (TYPENAME_IS_ENUM_P (t) && TREE_CODE (f) != ENUMERAL_TYPE) { if (complain & tf_error) error ("%qT resolves to %qT, which is not an enumeration type", t, f); else return error_mark_node; } else if (TYPENAME_IS_CLASS_P (t) && !CLASS_TYPE_P (f)) { if (complain & tf_error) error ("%qT resolves to %qT, which is not a class type", t, f); else return error_mark_node; } } return cp_build_qualified_type_real (f, cp_type_quals (f) | cp_type_quals (t), complain); } case UNBOUND_CLASS_TEMPLATE: { tree ctx = tsubst_aggr_type (TYPE_CONTEXT (t), args, complain, in_decl, /*entering_scope=*/1); tree name = TYPE_IDENTIFIER (t); tree parm_list = DECL_TEMPLATE_PARMS (TYPE_NAME (t)); if (ctx == error_mark_node || name == error_mark_node) return error_mark_node; if (parm_list) parm_list = tsubst_template_parms (parm_list, args, complain); return make_unbound_class_template (ctx, name, parm_list, complain); } case TYPEOF_TYPE: { tree type; ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; type = tsubst_expr (TYPEOF_TYPE_EXPR (t), args, complain, in_decl, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; type = finish_typeof (type); return cp_build_qualified_type_real (type, cp_type_quals (t) | cp_type_quals (type), complain); } case DECLTYPE_TYPE: { tree type; ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; type = tsubst_copy_and_build (DECLTYPE_TYPE_EXPR (t), args, complain|tf_decltype, in_decl, /*function_p*/false, /*integral_constant_expression*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; if (DECLTYPE_FOR_LAMBDA_CAPTURE (t)) type = lambda_capture_field_type (type, false /*explicit_init*/, DECLTYPE_FOR_REF_CAPTURE (t)); else if (DECLTYPE_FOR_LAMBDA_PROXY (t)) type = lambda_proxy_type (type); else { bool id = DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (t); if (id && TREE_CODE (DECLTYPE_TYPE_EXPR (t)) == BIT_NOT_EXPR && EXPR_P (type)) /* In a template ~id could be either a complement expression or an unqualified-id naming a destructor; if instantiating it produces an expression, it's not an id-expression or member access. */ id = false; type = finish_decltype_type (type, id, complain); } return cp_build_qualified_type_real (type, cp_type_quals (t) | cp_type_quals (type), complain | tf_ignore_bad_quals); } case UNDERLYING_TYPE: { tree type = tsubst (UNDERLYING_TYPE_TYPE (t), args, complain, in_decl); return finish_underlying_type (type); } case TYPE_ARGUMENT_PACK: case NONTYPE_ARGUMENT_PACK: { tree r; if (code == NONTYPE_ARGUMENT_PACK) r = make_node (code); else r = cxx_make_type (code); tree pack_args = ARGUMENT_PACK_ARGS (t); pack_args = tsubst_template_args (pack_args, args, complain, in_decl); SET_ARGUMENT_PACK_ARGS (r, pack_args); return r; } case VOID_CST: case INTEGER_CST: case REAL_CST: case STRING_CST: case PLUS_EXPR: case MINUS_EXPR: case NEGATE_EXPR: case NOP_EXPR: case INDIRECT_REF: case ADDR_EXPR: case CALL_EXPR: case ARRAY_REF: case SCOPE_REF: /* We should use one of the expression tsubsts for these codes. */ gcc_unreachable (); default: sorry ("use of %qs in template", get_tree_code_name (code)); return error_mark_node; } } /* tsubst a BASELINK. OBJECT_TYPE, if non-NULL, is the type of the expression on the left-hand side of the "." or "->" operator. We only do the lookup if we had a dependent BASELINK. Otherwise we adjust it onto the instantiated heirarchy. */ static tree tsubst_baselink (tree baselink, tree object_type, tree args, tsubst_flags_t complain, tree in_decl) { bool qualified_p = BASELINK_QUALIFIED_P (baselink); tree qualifying_scope = BINFO_TYPE (BASELINK_ACCESS_BINFO (baselink)); qualifying_scope = tsubst (qualifying_scope, args, complain, in_decl); tree optype = BASELINK_OPTYPE (baselink); optype = tsubst (optype, args, complain, in_decl); tree template_args = NULL_TREE; bool template_id_p = false; tree fns = BASELINK_FUNCTIONS (baselink); if (TREE_CODE (fns) == TEMPLATE_ID_EXPR) { template_id_p = true; template_args = TREE_OPERAND (fns, 1); fns = TREE_OPERAND (fns, 0); if (template_args) template_args = tsubst_template_args (template_args, args, complain, in_decl); } tree binfo_type = BINFO_TYPE (BASELINK_BINFO (baselink)); binfo_type = tsubst (binfo_type, args, complain, in_decl); bool dependent_p = binfo_type != BINFO_TYPE (BASELINK_BINFO (baselink)); if (dependent_p) { tree name = OVL_NAME (fns); if (IDENTIFIER_CONV_OP_P (name)) name = make_conv_op_name (optype); if (name == complete_dtor_identifier) /* Treat as-if non-dependent below. */ dependent_p = false; baselink = lookup_fnfields (qualifying_scope, name, /*protect=*/1, complain); if (!baselink) { if ((complain & tf_error) && constructor_name_p (name, qualifying_scope)) error ("cannot call constructor %<%T::%D%> directly", qualifying_scope, name); return error_mark_node; } if (BASELINK_P (baselink)) fns = BASELINK_FUNCTIONS (baselink); } else /* We're going to overwrite pieces below, make a duplicate. */ baselink = copy_node (baselink); /* If lookup found a single function, mark it as used at this point. (If lookup found multiple functions the one selected later by overload resolution will be marked as used at that point.) */ if (!template_id_p && !really_overloaded_fn (fns)) { tree fn = OVL_FIRST (fns); bool ok = mark_used (fn, complain); if (!ok && !(complain & tf_error)) return error_mark_node; if (ok && BASELINK_P (baselink)) /* We might have instantiated an auto function. */ TREE_TYPE (baselink) = TREE_TYPE (fn); } if (BASELINK_P (baselink)) { /* Add back the template arguments, if present. */ if (template_id_p) BASELINK_FUNCTIONS (baselink) = build2 (TEMPLATE_ID_EXPR, unknown_type_node, fns, template_args); /* Update the conversion operator type. */ BASELINK_OPTYPE (baselink) = optype; } if (!object_type) object_type = current_class_type; if (qualified_p || !dependent_p) { baselink = adjust_result_of_qualified_name_lookup (baselink, qualifying_scope, object_type); if (!qualified_p) /* We need to call adjust_result_of_qualified_name_lookup in case the destructor names a base class, but we unset BASELINK_QUALIFIED_P so that we still get virtual function binding. */ BASELINK_QUALIFIED_P (baselink) = false; } return baselink; } /* Like tsubst_expr for a SCOPE_REF, given by QUALIFIED_ID. DONE is true if the qualified-id will be a postfix-expression in-and-of itself; false if more of the postfix-expression follows the QUALIFIED_ID. ADDRESS_P is true if the qualified-id is the operand of "&". */ static tree tsubst_qualified_id (tree qualified_id, tree args, tsubst_flags_t complain, tree in_decl, bool done, bool address_p) { tree expr; tree scope; tree name; bool is_template; tree template_args; location_t loc = UNKNOWN_LOCATION; gcc_assert (TREE_CODE (qualified_id) == SCOPE_REF); /* Figure out what name to look up. */ name = TREE_OPERAND (qualified_id, 1); if (TREE_CODE (name) == TEMPLATE_ID_EXPR) { is_template = true; loc = EXPR_LOCATION (name); template_args = TREE_OPERAND (name, 1); if (template_args) template_args = tsubst_template_args (template_args, args, complain, in_decl); if (template_args == error_mark_node) return error_mark_node; name = TREE_OPERAND (name, 0); } else { is_template = false; template_args = NULL_TREE; } /* Substitute into the qualifying scope. When there are no ARGS, we are just trying to simplify a non-dependent expression. In that case the qualifying scope may be dependent, and, in any case, substituting will not help. */ scope = TREE_OPERAND (qualified_id, 0); if (args) { scope = tsubst (scope, args, complain, in_decl); expr = tsubst_copy (name, args, complain, in_decl); } else expr = name; if (dependent_scope_p (scope)) { if (is_template) expr = build_min_nt_loc (loc, TEMPLATE_ID_EXPR, expr, template_args); tree r = build_qualified_name (NULL_TREE, scope, expr, QUALIFIED_NAME_IS_TEMPLATE (qualified_id)); REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (qualified_id); return r; } if (!BASELINK_P (name) && !DECL_P (expr)) { if (TREE_CODE (expr) == BIT_NOT_EXPR) { /* A BIT_NOT_EXPR is used to represent a destructor. */ if (!check_dtor_name (scope, TREE_OPERAND (expr, 0))) { error ("qualifying type %qT does not match destructor name ~%qT", scope, TREE_OPERAND (expr, 0)); expr = error_mark_node; } else expr = lookup_qualified_name (scope, complete_dtor_identifier, LOOK_want::NORMAL, false); } else expr = lookup_qualified_name (scope, expr, LOOK_want::NORMAL, false); if (TREE_CODE (TREE_CODE (expr) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (expr) : expr) == TYPE_DECL) { if (complain & tf_error) { error ("dependent-name %qE is parsed as a non-type, but " "instantiation yields a type", qualified_id); inform (input_location, "say %<typename %E%> if a type is meant", qualified_id); } return error_mark_node; } } if (DECL_P (expr)) { if (!check_accessibility_of_qualified_id (expr, /*object_type=*/NULL_TREE, scope, complain)) return error_mark_node; /* Remember that there was a reference to this entity. */ if (!mark_used (expr, complain) && !(complain & tf_error)) return error_mark_node; } if (expr == error_mark_node || TREE_CODE (expr) == TREE_LIST) { if (complain & tf_error) qualified_name_lookup_error (scope, TREE_OPERAND (qualified_id, 1), expr, input_location); return error_mark_node; } if (is_template) { /* We may be repeating a check already done during parsing, but if it was well-formed and passed then, it will pass again now, and if it didn't, we wouldn't have got here. The case we want to catch is when we couldn't tell then, and can now, namely when templ prior to substitution was an identifier. */ if (flag_concepts && check_auto_in_tmpl_args (expr, template_args)) return error_mark_node; if (variable_template_p (expr)) expr = lookup_and_finish_template_variable (expr, template_args, complain); else expr = lookup_template_function (expr, template_args); } if (expr == error_mark_node && complain & tf_error) qualified_name_lookup_error (scope, TREE_OPERAND (qualified_id, 1), expr, input_location); else if (TYPE_P (scope)) { expr = (adjust_result_of_qualified_name_lookup (expr, scope, current_nonlambda_class_type ())); expr = (finish_qualified_id_expr (scope, expr, done, address_p && PTRMEM_OK_P (qualified_id), QUALIFIED_NAME_IS_TEMPLATE (qualified_id), /*template_arg_p=*/false, complain)); } /* Expressions do not generally have reference type. */ if (TREE_CODE (expr) != SCOPE_REF /* However, if we're about to form a pointer-to-member, we just want the referenced member referenced. */ && TREE_CODE (expr) != OFFSET_REF) expr = convert_from_reference (expr); if (REF_PARENTHESIZED_P (qualified_id)) expr = force_paren_expr (expr); return expr; } /* tsubst the initializer for a VAR_DECL. INIT is the unsubstituted initializer, DECL is the substituted VAR_DECL. Other arguments are as for tsubst. */ static tree tsubst_init (tree init, tree decl, tree args, tsubst_flags_t complain, tree in_decl) { if (!init) return NULL_TREE; init = tsubst_expr (init, args, complain, in_decl, false); tree type = TREE_TYPE (decl); if (!init && type != error_mark_node) { if (tree auto_node = type_uses_auto (type)) { if (!CLASS_PLACEHOLDER_TEMPLATE (auto_node)) { if (complain & tf_error) error ("initializer for %q#D expands to an empty list " "of expressions", decl); return error_mark_node; } } else if (!dependent_type_p (type)) { /* If we had an initializer but it instantiated to nothing, value-initialize the object. This will only occur when the initializer was a pack expansion where the parameter packs used in that expansion were of length zero. */ init = build_value_init (type, complain); if (TREE_CODE (init) == AGGR_INIT_EXPR) init = get_target_expr_sfinae (init, complain); if (TREE_CODE (init) == TARGET_EXPR) TARGET_EXPR_DIRECT_INIT_P (init) = true; } } return init; } /* If T is a reference to a dependent member of the current instantiation C and we are trying to refer to that member in a partial instantiation of C, return a SCOPE_REF; otherwise, return NULL_TREE. This can happen when forming a C++17 deduction guide, as in PR96199. */ static tree maybe_dependent_member_ref (tree t, tree args, tsubst_flags_t complain, tree in_decl) { if (cxx_dialect < cxx17) return NULL_TREE; tree ctx = context_for_name_lookup (t); if (!CLASS_TYPE_P (ctx)) return NULL_TREE; ctx = tsubst (ctx, args, complain, in_decl); if (dependent_scope_p (ctx)) return build_qualified_name (NULL_TREE, ctx, DECL_NAME (t), /*template_p=*/false); return NULL_TREE; } /* Like tsubst, but deals with expressions. This function just replaces template parms; to finish processing the resultant expression, use tsubst_copy_and_build or tsubst_expr. */ static tree tsubst_copy (tree t, tree args, tsubst_flags_t complain, tree in_decl) { enum tree_code code; tree r; if (t == NULL_TREE || t == error_mark_node || args == NULL_TREE) return t; code = TREE_CODE (t); switch (code) { case PARM_DECL: r = retrieve_local_specialization (t); if (r == NULL_TREE) { /* We get here for a use of 'this' in an NSDMI. */ if (DECL_NAME (t) == this_identifier && current_class_ptr) return current_class_ptr; /* This can happen for a parameter name used later in a function declaration (such as in a late-specified return type). Just make a dummy decl, since it's only used for its type. */ gcc_assert (cp_unevaluated_operand != 0); r = tsubst_decl (t, args, complain); /* Give it the template pattern as its context; its true context hasn't been instantiated yet and this is good enough for mangling. */ DECL_CONTEXT (r) = DECL_CONTEXT (t); } if (TREE_CODE (r) == ARGUMENT_PACK_SELECT) r = argument_pack_select_arg (r); if (!mark_used (r, complain) && !(complain & tf_error)) return error_mark_node; return r; case CONST_DECL: { tree enum_type; tree v; if (DECL_TEMPLATE_PARM_P (t)) return tsubst_copy (DECL_INITIAL (t), args, complain, in_decl); /* There is no need to substitute into namespace-scope enumerators. */ if (DECL_NAMESPACE_SCOPE_P (t)) return t; /* If ARGS is NULL, then T is known to be non-dependent. */ if (args == NULL_TREE) return scalar_constant_value (t); if (tree ref = maybe_dependent_member_ref (t, args, complain, in_decl)) return ref; /* Unfortunately, we cannot just call lookup_name here. Consider: template <int I> int f() { enum E { a = I }; struct S { void g() { E e = a; } }; }; When we instantiate f<7>::S::g(), say, lookup_name is not clever enough to find f<7>::a. */ enum_type = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl, /*entering_scope=*/0); for (v = TYPE_VALUES (enum_type); v != NULL_TREE; v = TREE_CHAIN (v)) if (TREE_PURPOSE (v) == DECL_NAME (t)) return TREE_VALUE (v); /* We didn't find the name. That should never happen; if name-lookup found it during preliminary parsing, we should find it again here during instantiation. */ gcc_unreachable (); } return t; case FIELD_DECL: if (DECL_CONTEXT (t)) { tree ctx; ctx = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl, /*entering_scope=*/1); if (ctx != DECL_CONTEXT (t)) { tree r = lookup_field (ctx, DECL_NAME (t), 0, false); if (!r) { if (complain & tf_error) error ("using invalid field %qD", t); return error_mark_node; } return r; } } return t; case VAR_DECL: if (tree ref = maybe_dependent_member_ref (t, args, complain, in_decl)) return ref; gcc_fallthrough(); case FUNCTION_DECL: if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t)) r = tsubst (t, args, complain, in_decl); else if (DECL_LOCAL_DECL_P (t)) { /* Local specialization will have been created when we instantiated the DECL_EXPR_DECL. */ r = retrieve_local_specialization (t); if (!r) r = error_mark_node; } else if (local_variable_p (t) && uses_template_parms (DECL_CONTEXT (t))) { r = retrieve_local_specialization (t); if (r == NULL_TREE) { /* First try name lookup to find the instantiation. */ r = lookup_name (DECL_NAME (t)); if (r) { if (!VAR_P (r)) { /* During error-recovery we may find a non-variable, even an OVERLOAD: just bail out and avoid ICEs and duplicate diagnostics (c++/62207). */ gcc_assert (seen_error ()); return error_mark_node; } if (!is_capture_proxy (r)) { /* Make sure the one we found is the one we want. */ tree ctx = enclosing_instantiation_of (DECL_CONTEXT (t)); if (ctx != DECL_CONTEXT (r)) r = NULL_TREE; } } if (r) /* OK */; else { /* This can happen for a variable used in a late-specified return type of a local lambda, or for a local static or constant. Building a new VAR_DECL should be OK in all those cases. */ r = tsubst_decl (t, args, complain); if (local_specializations) /* Avoid infinite recursion (79640). */ register_local_specialization (r, t); if (decl_maybe_constant_var_p (r)) { /* We can't call cp_finish_decl, so handle the initializer by hand. */ tree init = tsubst_init (DECL_INITIAL (t), r, args, complain, in_decl); if (!processing_template_decl) init = maybe_constant_init (init); if (processing_template_decl ? potential_constant_expression (init) : reduced_constant_expression_p (init)) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r) = TREE_CONSTANT (r) = true; DECL_INITIAL (r) = init; if (tree auto_node = type_uses_auto (TREE_TYPE (r))) TREE_TYPE (r) = do_auto_deduction (TREE_TYPE (r), init, auto_node, complain, adc_variable_type); } gcc_assert (cp_unevaluated_operand || TREE_STATIC (r) || decl_constant_var_p (r) || seen_error ()); if (!processing_template_decl && !TREE_STATIC (r)) r = process_outer_var_ref (r, complain); } /* Remember this for subsequent uses. */ if (local_specializations) register_local_specialization (r, t); } if (TREE_CODE (r) == ARGUMENT_PACK_SELECT) r = argument_pack_select_arg (r); } else r = t; if (!mark_used (r, complain)) return error_mark_node; return r; case NAMESPACE_DECL: return t; case OVERLOAD: return t; case BASELINK: return tsubst_baselink (t, current_nonlambda_class_type (), args, complain, in_decl); case TEMPLATE_DECL: if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) return tsubst (TREE_TYPE (DECL_TEMPLATE_RESULT (t)), args, complain, in_decl); else if (DECL_FUNCTION_TEMPLATE_P (t) && DECL_MEMBER_TEMPLATE_P (t)) return tsubst (t, args, complain, in_decl); else if (DECL_CLASS_SCOPE_P (t) && uses_template_parms (DECL_CONTEXT (t))) { /* Template template argument like the following example need special treatment: template <template <class> class TT> struct C {}; template <class T> struct D { template <class U> struct E {}; C<E> c; // #1 }; D<int> d; // #2 We are processing the template argument `E' in #1 for the template instantiation #2. Originally, `E' is a TEMPLATE_DECL with `D<T>' as its DECL_CONTEXT. Now we have to substitute this with one having context `D<int>'. */ tree context = tsubst (DECL_CONTEXT (t), args, complain, in_decl); if (dependent_scope_p (context)) { /* When rewriting a constructor into a deduction guide, a non-dependent name can become dependent, so memtmpl<args> becomes context::template memtmpl<args>. */ tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); return build_qualified_name (type, context, DECL_NAME (t), /*template*/true); } return lookup_field (context, DECL_NAME(t), 0, false); } else /* Ordinary template template argument. */ return t; case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: { /* Handle location wrappers by substituting the wrapped node first, *then* reusing the resulting type. Doing the type first ensures that we handle template parameters and parameter pack expansions. */ if (location_wrapper_p (t)) { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); return maybe_wrap_with_location (op0, EXPR_LOCATION (t)); } tree op = TREE_OPERAND (t, 0); if (code == VIEW_CONVERT_EXPR && TREE_CODE (op) == TEMPLATE_PARM_INDEX) { /* Wrapper to make a C++20 template parameter object const. */ op = tsubst_copy (op, args, complain, in_decl); if (TREE_CODE (op) == TEMPLATE_PARM_INDEX) { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); return build1 (code, type, op); } else { gcc_assert (CP_TYPE_CONST_P (TREE_TYPE (op)) || (TREE_CODE (op) == IMPLICIT_CONV_EXPR && IMPLICIT_CONV_EXPR_NONTYPE_ARG (op))); return op; } } /* force_paren_expr can also create a VIEW_CONVERT_EXPR. */ else if (code == VIEW_CONVERT_EXPR && REF_PARENTHESIZED_P (t)) { op = tsubst_copy (op, args, complain, in_decl); op = build1 (code, TREE_TYPE (op), op); REF_PARENTHESIZED_P (op) = true; return op; } /* We shouldn't see any other uses of these in templates. */ gcc_unreachable (); } case CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case STATIC_CAST_EXPR: case DYNAMIC_CAST_EXPR: case IMPLICIT_CONV_EXPR: case CONVERT_EXPR: case NOP_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); return build1 (code, type, op0); } case SIZEOF_EXPR: if (PACK_EXPANSION_P (TREE_OPERAND (t, 0)) || ARGUMENT_PACK_P (TREE_OPERAND (t, 0))) { tree expanded, op = TREE_OPERAND (t, 0); int len = 0; if (SIZEOF_EXPR_TYPE_P (t)) op = TREE_TYPE (op); ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; /* We only want to compute the number of arguments. */ if (PACK_EXPANSION_P (op)) expanded = tsubst_pack_expansion (op, args, complain, in_decl); else expanded = tsubst_template_args (ARGUMENT_PACK_ARGS (op), args, complain, in_decl); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; if (TREE_CODE (expanded) == TREE_VEC) { len = TREE_VEC_LENGTH (expanded); /* Set TREE_USED for the benefit of -Wunused. */ for (int i = 0; i < len; i++) if (DECL_P (TREE_VEC_ELT (expanded, i))) TREE_USED (TREE_VEC_ELT (expanded, i)) = true; } if (expanded == error_mark_node) return error_mark_node; else if (PACK_EXPANSION_P (expanded) || (TREE_CODE (expanded) == TREE_VEC && pack_expansion_args_count (expanded))) { if (PACK_EXPANSION_P (expanded)) /* OK. */; else if (TREE_VEC_LENGTH (expanded) == 1) expanded = TREE_VEC_ELT (expanded, 0); else expanded = make_argument_pack (expanded); if (TYPE_P (expanded)) return cxx_sizeof_or_alignof_type (input_location, expanded, SIZEOF_EXPR, false, complain & tf_error); else return cxx_sizeof_or_alignof_expr (input_location, expanded, SIZEOF_EXPR, complain & tf_error); } else return build_int_cst (size_type_node, len); } if (SIZEOF_EXPR_TYPE_P (t)) { r = tsubst (TREE_TYPE (TREE_OPERAND (t, 0)), args, complain, in_decl); r = build1 (NOP_EXPR, r, error_mark_node); r = build1 (SIZEOF_EXPR, tsubst (TREE_TYPE (t), args, complain, in_decl), r); SIZEOF_EXPR_TYPE_P (r) = 1; return r; } /* Fall through */ case INDIRECT_REF: case NEGATE_EXPR: case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case ADDR_EXPR: case UNARY_PLUS_EXPR: /* Unary + */ case ALIGNOF_EXPR: case AT_ENCODE_EXPR: case ARROW_EXPR: case THROW_EXPR: case TYPEID_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case PAREN_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); r = build1 (code, type, op0); if (code == ALIGNOF_EXPR) ALIGNOF_EXPR_STD_P (r) = ALIGNOF_EXPR_STD_P (t); return r; } case COMPONENT_REF: { tree object; tree name; object = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); name = TREE_OPERAND (t, 1); if (TREE_CODE (name) == BIT_NOT_EXPR) { name = tsubst_copy (TREE_OPERAND (name, 0), args, complain, in_decl); name = build1 (BIT_NOT_EXPR, NULL_TREE, name); } else if (TREE_CODE (name) == SCOPE_REF && TREE_CODE (TREE_OPERAND (name, 1)) == BIT_NOT_EXPR) { tree base = tsubst_copy (TREE_OPERAND (name, 0), args, complain, in_decl); name = TREE_OPERAND (name, 1); name = tsubst_copy (TREE_OPERAND (name, 0), args, complain, in_decl); name = build1 (BIT_NOT_EXPR, NULL_TREE, name); name = build_qualified_name (/*type=*/NULL_TREE, base, name, /*template_p=*/false); } else if (BASELINK_P (name)) name = tsubst_baselink (name, non_reference (TREE_TYPE (object)), args, complain, in_decl); else name = tsubst_copy (name, args, complain, in_decl); return build_nt (COMPONENT_REF, object, name, NULL_TREE); } case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case RSHIFT_EXPR: case LSHIFT_EXPR: case EQ_EXPR: case NE_EXPR: case MAX_EXPR: case MIN_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: case COMPOUND_EXPR: case DOTSTAR_EXPR: case MEMBER_REF: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); return build_nt (code, op0, op1); } case SCOPE_REF: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); return build_qualified_name (/*type=*/NULL_TREE, op0, op1, QUALIFIED_NAME_IS_TEMPLATE (t)); } case ARRAY_REF: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); return build_nt (ARRAY_REF, op0, op1, NULL_TREE, NULL_TREE); } case CALL_EXPR: { int n = VL_EXP_OPERAND_LENGTH (t); tree result = build_vl_exp (CALL_EXPR, n); int i; for (i = 0; i < n; i++) TREE_OPERAND (t, i) = tsubst_copy (TREE_OPERAND (t, i), args, complain, in_decl); return result; } case COND_EXPR: case MODOP_EXPR: case PSEUDO_DTOR_EXPR: case VEC_PERM_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl); r = build_nt (code, op0, op1, op2); TREE_NO_WARNING (r) = TREE_NO_WARNING (t); return r; } case NEW_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl); r = build_nt (code, op0, op1, op2); NEW_EXPR_USE_GLOBAL (r) = NEW_EXPR_USE_GLOBAL (t); return r; } case DELETE_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); r = build_nt (code, op0, op1); DELETE_EXPR_USE_GLOBAL (r) = DELETE_EXPR_USE_GLOBAL (t); DELETE_EXPR_USE_VEC (r) = DELETE_EXPR_USE_VEC (t); return r; } case TEMPLATE_ID_EXPR: { /* Substituted template arguments */ tree fn = TREE_OPERAND (t, 0); tree targs = TREE_OPERAND (t, 1); fn = tsubst_copy (fn, args, complain, in_decl); if (targs) targs = tsubst_template_args (targs, args, complain, in_decl); return lookup_template_function (fn, targs); } case TREE_LIST: { tree purpose, value, chain; if (t == void_list_node) return t; purpose = TREE_PURPOSE (t); if (purpose) purpose = tsubst_copy (purpose, args, complain, in_decl); value = TREE_VALUE (t); if (value) value = tsubst_copy (value, args, complain, in_decl); chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = tsubst_copy (chain, args, complain, in_decl); if (purpose == TREE_PURPOSE (t) && value == TREE_VALUE (t) && chain == TREE_CHAIN (t)) return t; return tree_cons (purpose, value, chain); } case RECORD_TYPE: case UNION_TYPE: case ENUMERAL_TYPE: case INTEGER_TYPE: case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: case TEMPLATE_PARM_INDEX: case POINTER_TYPE: case REFERENCE_TYPE: case OFFSET_TYPE: case FUNCTION_TYPE: case METHOD_TYPE: case ARRAY_TYPE: case TYPENAME_TYPE: case UNBOUND_CLASS_TEMPLATE: case TYPEOF_TYPE: case DECLTYPE_TYPE: case TYPE_DECL: return tsubst (t, args, complain, in_decl); case USING_DECL: t = DECL_NAME (t); /* Fall through. */ case IDENTIFIER_NODE: if (IDENTIFIER_CONV_OP_P (t)) { tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); return make_conv_op_name (new_type); } else return t; case CONSTRUCTOR: /* This is handled by tsubst_copy_and_build. */ gcc_unreachable (); case VA_ARG_EXPR: { tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); return build_x_va_arg (EXPR_LOCATION (t), op0, type); } case CLEANUP_POINT_EXPR: /* We shouldn't have built any of these during initial template generation. Instead, they should be built during instantiation in response to the saved STMT_IS_FULL_EXPR_P setting. */ gcc_unreachable (); case OFFSET_REF: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); r = build2 (code, type, op0, op1); PTRMEM_OK_P (r) = PTRMEM_OK_P (t); if (!mark_used (TREE_OPERAND (r, 1), complain) && !(complain & tf_error)) return error_mark_node; return r; } case EXPR_PACK_EXPANSION: error ("invalid use of pack expansion expression"); return error_mark_node; case NONTYPE_ARGUMENT_PACK: error ("use %<...%> to expand argument pack"); return error_mark_node; case VOID_CST: gcc_checking_assert (t == void_node && VOID_TYPE_P (TREE_TYPE (t))); return t; case INTEGER_CST: case REAL_CST: case COMPLEX_CST: { /* Instantiate any typedefs in the type. */ tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); r = fold_convert (type, t); gcc_assert (TREE_CODE (r) == code); return r; } case STRING_CST: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); r = t; if (type != TREE_TYPE (t)) { r = copy_node (t); TREE_TYPE (r) = type; } return r; } case PTRMEM_CST: /* These can sometimes show up in a partial instantiation, but never involve template parms. */ gcc_assert (!uses_template_parms (t)); return t; case UNARY_LEFT_FOLD_EXPR: return tsubst_unary_left_fold (t, args, complain, in_decl); case UNARY_RIGHT_FOLD_EXPR: return tsubst_unary_right_fold (t, args, complain, in_decl); case BINARY_LEFT_FOLD_EXPR: return tsubst_binary_left_fold (t, args, complain, in_decl); case BINARY_RIGHT_FOLD_EXPR: return tsubst_binary_right_fold (t, args, complain, in_decl); case PREDICT_EXPR: return t; case DEBUG_BEGIN_STMT: /* ??? There's no point in copying it for now, but maybe some day it will contain more information, such as a pointer back to the containing function, inlined copy or so. */ return t; case CO_AWAIT_EXPR: return tsubst_expr (t, args, complain, in_decl, /*integral_constant_expression_p=*/false); break; default: /* We shouldn't get here, but keep going if !flag_checking. */ if (flag_checking) gcc_unreachable (); return t; } } /* Helper function for tsubst_omp_clauses, used for instantiation of OMP_CLAUSE_DECL of clauses. */ static tree tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain, tree in_decl, tree *iterator_cache) { if (decl == NULL_TREE) return NULL_TREE; /* Handle OpenMP iterators. */ if (TREE_CODE (decl) == TREE_LIST && TREE_PURPOSE (decl) && TREE_CODE (TREE_PURPOSE (decl)) == TREE_VEC) { tree ret; if (iterator_cache[0] == TREE_PURPOSE (decl)) ret = iterator_cache[1]; else { tree *tp = &ret; begin_scope (sk_omp, NULL); for (tree it = TREE_PURPOSE (decl); it; it = TREE_CHAIN (it)) { *tp = copy_node (it); TREE_VEC_ELT (*tp, 0) = tsubst_decl (TREE_VEC_ELT (it, 0), args, complain); TREE_VEC_ELT (*tp, 1) = tsubst_expr (TREE_VEC_ELT (it, 1), args, complain, in_decl, /*integral_constant_expression_p=*/false); TREE_VEC_ELT (*tp, 2) = tsubst_expr (TREE_VEC_ELT (it, 2), args, complain, in_decl, /*integral_constant_expression_p=*/false); TREE_VEC_ELT (*tp, 3) = tsubst_expr (TREE_VEC_ELT (it, 3), args, complain, in_decl, /*integral_constant_expression_p=*/false); TREE_CHAIN (*tp) = NULL_TREE; tp = &TREE_CHAIN (*tp); } TREE_VEC_ELT (ret, 5) = poplevel (1, 1, 0); iterator_cache[0] = TREE_PURPOSE (decl); iterator_cache[1] = ret; } return build_tree_list (ret, tsubst_omp_clause_decl (TREE_VALUE (decl), args, complain, in_decl, NULL)); } /* Handle an OpenMP array section represented as a TREE_LIST (or OMP_CLAUSE_DEPEND_KIND). An OMP_CLAUSE_DEPEND (with a depend kind of OMP_CLAUSE_DEPEND_SINK) can also be represented as a TREE_LIST. We can handle it exactly the same as an array section (purpose, value, and a chain), even though the nomenclature (low_bound, length, etc) is different. */ if (TREE_CODE (decl) == TREE_LIST) { tree low_bound = tsubst_expr (TREE_PURPOSE (decl), args, complain, in_decl, /*integral_constant_expression_p=*/false); tree length = tsubst_expr (TREE_VALUE (decl), args, complain, in_decl, /*integral_constant_expression_p=*/false); tree chain = tsubst_omp_clause_decl (TREE_CHAIN (decl), args, complain, in_decl, NULL); if (TREE_PURPOSE (decl) == low_bound && TREE_VALUE (decl) == length && TREE_CHAIN (decl) == chain) return decl; tree ret = tree_cons (low_bound, length, chain); OMP_CLAUSE_DEPEND_SINK_NEGATIVE (ret) = OMP_CLAUSE_DEPEND_SINK_NEGATIVE (decl); return ret; } tree ret = tsubst_expr (decl, args, complain, in_decl, /*integral_constant_expression_p=*/false); /* Undo convert_from_reference tsubst_expr could have called. */ if (decl && REFERENCE_REF_P (ret) && !REFERENCE_REF_P (decl)) ret = TREE_OPERAND (ret, 0); return ret; } /* Like tsubst_copy, but specifically for OpenMP clauses. */ static tree tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort, tree args, tsubst_flags_t complain, tree in_decl) { tree new_clauses = NULL_TREE, nc, oc; tree linear_no_step = NULL_TREE; tree iterator_cache[2] = { NULL_TREE, NULL_TREE }; for (oc = clauses; oc ; oc = OMP_CLAUSE_CHAIN (oc)) { nc = copy_node (oc); OMP_CLAUSE_CHAIN (nc) = new_clauses; new_clauses = nc; switch (OMP_CLAUSE_CODE (nc)) { case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_STMT (oc)) { OMP_CLAUSE_LASTPRIVATE_STMT (nc) = push_stmt_list (); tsubst_expr (OMP_CLAUSE_LASTPRIVATE_STMT (oc), args, complain, in_decl, /*integral_constant_expression_p=*/false); OMP_CLAUSE_LASTPRIVATE_STMT (nc) = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (nc)); } /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_UNIFORM: case OMP_CLAUSE_DEPEND: case OMP_CLAUSE_FROM: case OMP_CLAUSE_TO: case OMP_CLAUSE_MAP: case OMP_CLAUSE_NONTEMPORAL: case OMP_CLAUSE_USE_DEVICE_PTR: case OMP_CLAUSE_USE_DEVICE_ADDR: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_INCLUSIVE: case OMP_CLAUSE_EXCLUSIVE: OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl, iterator_cache); break; case OMP_CLAUSE_TILE: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_DIST_SCHEDULE: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_PRIORITY: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_HINT: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: case OMP_CLAUSE_ASYNC: case OMP_CLAUSE_WAIT: OMP_CLAUSE_OPERAND (nc, 0) = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 0), args, complain, in_decl, /*integral_constant_expression_p=*/false); break; case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (oc)) { tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (oc); if (TREE_CODE (placeholder) == SCOPE_REF) { tree scope = tsubst (TREE_OPERAND (placeholder, 0), args, complain, in_decl); OMP_CLAUSE_REDUCTION_PLACEHOLDER (nc) = build_qualified_name (NULL_TREE, scope, TREE_OPERAND (placeholder, 1), false); } else gcc_assert (identifier_p (placeholder)); } OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl, NULL); break; case OMP_CLAUSE_GANG: case OMP_CLAUSE_ALIGNED: OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl, NULL); OMP_CLAUSE_OPERAND (nc, 1) = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 1), args, complain, in_decl, /*integral_constant_expression_p=*/false); break; case OMP_CLAUSE_LINEAR: OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl, NULL); if (OMP_CLAUSE_LINEAR_STEP (oc) == NULL_TREE) { gcc_assert (!linear_no_step); linear_no_step = nc; } else if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (oc)) OMP_CLAUSE_LINEAR_STEP (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_LINEAR_STEP (oc), args, complain, in_decl, NULL); else OMP_CLAUSE_LINEAR_STEP (nc) = tsubst_expr (OMP_CLAUSE_LINEAR_STEP (oc), args, complain, in_decl, /*integral_constant_expression_p=*/false); break; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_INBRANCH: case OMP_CLAUSE_NOTINBRANCH: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_FOR: case OMP_CLAUSE_PARALLEL: case OMP_CLAUSE_SECTIONS: case OMP_CLAUSE_TASKGROUP: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_THREADS: case OMP_CLAUSE_SIMD: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_ORDER: case OMP_CLAUSE_BIND: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_IF_PRESENT: case OMP_CLAUSE_FINALIZE: break; default: gcc_unreachable (); } if ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP) switch (OMP_CLAUSE_CODE (nc)) { case OMP_CLAUSE_SHARED: case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_LINEAR: case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: case OMP_CLAUSE_USE_DEVICE_PTR: case OMP_CLAUSE_USE_DEVICE_ADDR: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_INCLUSIVE: case OMP_CLAUSE_EXCLUSIVE: /* tsubst_expr on SCOPE_REF results in returning finish_non_static_data_member result. Undo that here. */ if (TREE_CODE (OMP_CLAUSE_DECL (oc)) == SCOPE_REF && (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (oc), 1)) == IDENTIFIER_NODE)) { tree t = OMP_CLAUSE_DECL (nc); tree v = t; while (v) switch (TREE_CODE (v)) { case COMPONENT_REF: case MEM_REF: case INDIRECT_REF: CASE_CONVERT: case POINTER_PLUS_EXPR: v = TREE_OPERAND (v, 0); continue; case PARM_DECL: if (DECL_CONTEXT (v) == current_function_decl && DECL_ARTIFICIAL (v) && DECL_NAME (v) == this_identifier) OMP_CLAUSE_DECL (nc) = TREE_OPERAND (t, 1); /* FALLTHRU */ default: v = NULL_TREE; break; } } else if (VAR_P (OMP_CLAUSE_DECL (oc)) && DECL_HAS_VALUE_EXPR_P (OMP_CLAUSE_DECL (oc)) && DECL_ARTIFICIAL (OMP_CLAUSE_DECL (oc)) && DECL_LANG_SPECIFIC (OMP_CLAUSE_DECL (oc)) && DECL_OMP_PRIVATIZED_MEMBER (OMP_CLAUSE_DECL (oc))) { tree decl = OMP_CLAUSE_DECL (nc); if (VAR_P (decl)) { retrofit_lang_decl (decl); DECL_OMP_PRIVATIZED_MEMBER (decl) = 1; } } break; default: break; } } new_clauses = nreverse (new_clauses); if (ort != C_ORT_OMP_DECLARE_SIMD) { new_clauses = finish_omp_clauses (new_clauses, ort); if (linear_no_step) for (nc = new_clauses; nc; nc = OMP_CLAUSE_CHAIN (nc)) if (nc == linear_no_step) { OMP_CLAUSE_LINEAR_STEP (nc) = NULL_TREE; break; } } return new_clauses; } /* Like tsubst_copy_and_build, but unshare TREE_LIST nodes. */ static tree tsubst_copy_asm_operands (tree t, tree args, tsubst_flags_t complain, tree in_decl) { #define RECUR(t) tsubst_copy_asm_operands (t, args, complain, in_decl) tree purpose, value, chain; if (t == NULL) return t; if (TREE_CODE (t) != TREE_LIST) return tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); if (t == void_list_node) return t; purpose = TREE_PURPOSE (t); if (purpose) purpose = RECUR (purpose); value = TREE_VALUE (t); if (value) { if (TREE_CODE (value) != LABEL_DECL) value = RECUR (value); else { value = lookup_label (DECL_NAME (value)); gcc_assert (TREE_CODE (value) == LABEL_DECL); TREE_USED (value) = 1; } } chain = TREE_CHAIN (t); if (chain && chain != void_type_node) chain = RECUR (chain); return tree_cons (purpose, value, chain); #undef RECUR } /* Used to temporarily communicate the list of #pragma omp parallel clauses to #pragma omp for instantiation if they are combined together. */ static tree *omp_parallel_combined_clauses; static tree tsubst_decomp_names (tree, tree, tree, tsubst_flags_t, tree, tree *, unsigned int *); /* Substitute one OMP_FOR iterator. */ static bool tsubst_omp_for_iterator (tree t, int i, tree declv, tree &orig_declv, tree initv, tree condv, tree incrv, tree *clauses, tree args, tsubst_flags_t complain, tree in_decl, bool integral_constant_expression_p) { #define RECUR(NODE) \ tsubst_expr ((NODE), args, complain, in_decl, \ integral_constant_expression_p) tree decl, init, cond = NULL_TREE, incr = NULL_TREE; bool ret = false; init = TREE_VEC_ELT (OMP_FOR_INIT (t), i); gcc_assert (TREE_CODE (init) == MODIFY_EXPR); decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 1); tree decl_expr = NULL_TREE; bool range_for = TREE_VEC_ELT (OMP_FOR_COND (t), i) == global_namespace; if (range_for) { bool decomp = false; if (decl != error_mark_node && DECL_HAS_VALUE_EXPR_P (decl)) { tree v = DECL_VALUE_EXPR (decl); if (TREE_CODE (v) == ARRAY_REF && VAR_P (TREE_OPERAND (v, 0)) && DECL_DECOMPOSITION_P (TREE_OPERAND (v, 0))) { tree decomp_first = NULL_TREE; unsigned decomp_cnt = 0; tree d = tsubst_decl (TREE_OPERAND (v, 0), args, complain); maybe_push_decl (d); d = tsubst_decomp_names (d, TREE_OPERAND (v, 0), args, complain, in_decl, &decomp_first, &decomp_cnt); decomp = true; if (d == error_mark_node) decl = error_mark_node; else for (unsigned int i = 0; i < decomp_cnt; i++) { if (!DECL_HAS_VALUE_EXPR_P (decomp_first)) { tree v = build_nt (ARRAY_REF, d, size_int (decomp_cnt - i - 1), NULL_TREE, NULL_TREE); SET_DECL_VALUE_EXPR (decomp_first, v); DECL_HAS_VALUE_EXPR_P (decomp_first) = 1; } fit_decomposition_lang_decl (decomp_first, d); decomp_first = DECL_CHAIN (decomp_first); } } } decl = tsubst_decl (decl, args, complain); if (!decomp) maybe_push_decl (decl); } else if (init && TREE_CODE (init) == DECL_EXPR) { /* We need to jump through some hoops to handle declarations in the init-statement, since we might need to handle auto deduction, but we need to keep control of initialization. */ decl_expr = init; init = DECL_INITIAL (DECL_EXPR_DECL (init)); decl = tsubst_decl (decl, args, complain); } else { if (TREE_CODE (decl) == SCOPE_REF) { decl = RECUR (decl); if (TREE_CODE (decl) == COMPONENT_REF) { tree v = decl; while (v) switch (TREE_CODE (v)) { case COMPONENT_REF: case MEM_REF: case INDIRECT_REF: CASE_CONVERT: case POINTER_PLUS_EXPR: v = TREE_OPERAND (v, 0); continue; case PARM_DECL: if (DECL_CONTEXT (v) == current_function_decl && DECL_ARTIFICIAL (v) && DECL_NAME (v) == this_identifier) { decl = TREE_OPERAND (decl, 1); decl = omp_privatize_field (decl, false); } /* FALLTHRU */ default: v = NULL_TREE; break; } } } else decl = RECUR (decl); } if (init && TREE_CODE (init) == TREE_VEC) { init = copy_node (init); TREE_VEC_ELT (init, 0) = tsubst_decl (TREE_VEC_ELT (init, 0), args, complain); TREE_VEC_ELT (init, 1) = RECUR (TREE_VEC_ELT (init, 1)); TREE_VEC_ELT (init, 2) = RECUR (TREE_VEC_ELT (init, 2)); } else init = RECUR (init); if (orig_declv && OMP_FOR_ORIG_DECLS (t)) { tree o = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (t), i); if (TREE_CODE (o) == TREE_LIST) TREE_VEC_ELT (orig_declv, i) = tree_cons (RECUR (TREE_PURPOSE (o)), RECUR (TREE_VALUE (o)), NULL_TREE); else TREE_VEC_ELT (orig_declv, i) = RECUR (o); } if (range_for) { tree this_pre_body = NULL_TREE; tree orig_init = NULL_TREE; tree orig_decl = NULL_TREE; cp_convert_omp_range_for (this_pre_body, NULL, decl, orig_decl, init, orig_init, cond, incr); if (orig_decl) { if (orig_declv == NULL_TREE) orig_declv = copy_node (declv); TREE_VEC_ELT (orig_declv, i) = orig_decl; ret = true; } else if (orig_declv) TREE_VEC_ELT (orig_declv, i) = decl; } tree auto_node = type_uses_auto (TREE_TYPE (decl)); if (!range_for && auto_node && init) TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl), init, auto_node, complain); gcc_assert (!type_dependent_expression_p (decl)); if (!CLASS_TYPE_P (TREE_TYPE (decl)) || range_for) { if (decl_expr) { /* Declare the variable, but don't let that initialize it. */ tree init_sav = DECL_INITIAL (DECL_EXPR_DECL (decl_expr)); DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = NULL_TREE; RECUR (decl_expr); DECL_INITIAL (DECL_EXPR_DECL (decl_expr)) = init_sav; } if (!range_for) { cond = TREE_VEC_ELT (OMP_FOR_COND (t), i); if (COMPARISON_CLASS_P (cond) && TREE_CODE (TREE_OPERAND (cond, 1)) == TREE_VEC) { tree lhs = RECUR (TREE_OPERAND (cond, 0)); tree rhs = copy_node (TREE_OPERAND (cond, 1)); TREE_VEC_ELT (rhs, 0) = tsubst_decl (TREE_VEC_ELT (rhs, 0), args, complain); TREE_VEC_ELT (rhs, 1) = RECUR (TREE_VEC_ELT (rhs, 1)); TREE_VEC_ELT (rhs, 2) = RECUR (TREE_VEC_ELT (rhs, 2)); cond = build2 (TREE_CODE (cond), TREE_TYPE (cond), lhs, rhs); } else cond = RECUR (cond); incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i); if (TREE_CODE (incr) == MODIFY_EXPR) { tree lhs = RECUR (TREE_OPERAND (incr, 0)); tree rhs = RECUR (TREE_OPERAND (incr, 1)); incr = build_x_modify_expr (EXPR_LOCATION (incr), lhs, NOP_EXPR, rhs, complain); } else incr = RECUR (incr); if (orig_declv && !OMP_FOR_ORIG_DECLS (t)) TREE_VEC_ELT (orig_declv, i) = decl; } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; return ret; } if (decl_expr) { /* Declare and initialize the variable. */ RECUR (decl_expr); init = NULL_TREE; } else if (init) { tree *pc; int j; for (j = ((omp_parallel_combined_clauses == NULL || TREE_CODE (t) == OMP_LOOP) ? 1 : 0); j < 2; j++) { for (pc = j ? clauses : omp_parallel_combined_clauses; *pc; ) { if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_DECL (*pc) == decl) break; else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (*pc) == decl) { if (j) break; /* Move lastprivate (decl) clause to OMP_FOR_CLAUSES. */ tree c = *pc; *pc = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = *clauses; *clauses = c; } else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_DECL (*pc) == decl) { error ("iteration variable %qD should not be firstprivate", decl); *pc = OMP_CLAUSE_CHAIN (*pc); } else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_DECL (*pc) == decl) { error ("iteration variable %qD should not be reduction", decl); *pc = OMP_CLAUSE_CHAIN (*pc); } else pc = &OMP_CLAUSE_CHAIN (*pc); } if (*pc) break; } if (*pc == NULL_TREE) { tree c = build_omp_clause (input_location, TREE_CODE (t) == OMP_LOOP ? OMP_CLAUSE_LASTPRIVATE : OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; c = finish_omp_clauses (c, C_ORT_OMP); if (c) { OMP_CLAUSE_CHAIN (c) = *clauses; *clauses = c; } } } cond = TREE_VEC_ELT (OMP_FOR_COND (t), i); if (COMPARISON_CLASS_P (cond)) { tree op0 = RECUR (TREE_OPERAND (cond, 0)); tree op1 = RECUR (TREE_OPERAND (cond, 1)); cond = build2 (TREE_CODE (cond), boolean_type_node, op0, op1); } else cond = RECUR (cond); incr = TREE_VEC_ELT (OMP_FOR_INCR (t), i); switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: incr = build2 (TREE_CODE (incr), TREE_TYPE (decl), RECUR (TREE_OPERAND (incr, 0)), NULL_TREE); break; case MODIFY_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree rhs = TREE_OPERAND (incr, 1); tree lhs = RECUR (TREE_OPERAND (incr, 0)); tree rhs0 = RECUR (TREE_OPERAND (rhs, 0)); tree rhs1 = RECUR (TREE_OPERAND (rhs, 1)); incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs, build2 (TREE_CODE (rhs), TREE_TYPE (decl), rhs0, rhs1)); } else incr = RECUR (incr); break; case MODOP_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree lhs = RECUR (TREE_OPERAND (incr, 0)); incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs, build2 (TREE_CODE (TREE_OPERAND (incr, 1)), TREE_TYPE (decl), lhs, RECUR (TREE_OPERAND (incr, 2)))); } else if (TREE_CODE (TREE_OPERAND (incr, 1)) == NOP_EXPR && (TREE_CODE (TREE_OPERAND (incr, 2)) == PLUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 2)) == MINUS_EXPR))) { tree rhs = TREE_OPERAND (incr, 2); tree lhs = RECUR (TREE_OPERAND (incr, 0)); tree rhs0 = RECUR (TREE_OPERAND (rhs, 0)); tree rhs1 = RECUR (TREE_OPERAND (rhs, 1)); incr = build2 (MODIFY_EXPR, TREE_TYPE (decl), lhs, build2 (TREE_CODE (rhs), TREE_TYPE (decl), rhs0, rhs1)); } else incr = RECUR (incr); break; default: incr = RECUR (incr); break; } if (orig_declv && !OMP_FOR_ORIG_DECLS (t)) TREE_VEC_ELT (orig_declv, i) = decl; TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; return false; #undef RECUR } /* Helper function of tsubst_expr, find OMP_TEAMS inside of OMP_TARGET's body. */ static tree tsubst_find_omp_teams (tree *tp, int *walk_subtrees, void *) { *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_TEAMS: return *tp; case BIND_EXPR: case STATEMENT_LIST: *walk_subtrees = 1; break; default: break; } return NULL_TREE; } /* Helper function for tsubst_expr. For decomposition declaration artificial base DECL, which is tsubsted PATTERN_DECL, tsubst also the corresponding decls representing the identifiers of the decomposition declaration. Return DECL if successful or error_mark_node otherwise, set *FIRST to the first decl in the list chained through DECL_CHAIN and *CNT to the number of such decls. */ static tree tsubst_decomp_names (tree decl, tree pattern_decl, tree args, tsubst_flags_t complain, tree in_decl, tree *first, unsigned int *cnt) { tree decl2, decl3, prev = decl; *cnt = 0; gcc_assert (DECL_NAME (decl) == NULL_TREE); for (decl2 = DECL_CHAIN (pattern_decl); decl2 && VAR_P (decl2) && DECL_DECOMPOSITION_P (decl2) && DECL_NAME (decl2); decl2 = DECL_CHAIN (decl2)) { if (TREE_TYPE (decl2) == error_mark_node && *cnt == 0) { gcc_assert (errorcount); return error_mark_node; } (*cnt)++; gcc_assert (DECL_DECOMP_BASE (decl2) == pattern_decl); gcc_assert (DECL_HAS_VALUE_EXPR_P (decl2)); tree v = DECL_VALUE_EXPR (decl2); DECL_HAS_VALUE_EXPR_P (decl2) = 0; SET_DECL_VALUE_EXPR (decl2, NULL_TREE); decl3 = tsubst (decl2, args, complain, in_decl); SET_DECL_VALUE_EXPR (decl2, v); DECL_HAS_VALUE_EXPR_P (decl2) = 1; if (VAR_P (decl3)) DECL_TEMPLATE_INSTANTIATED (decl3) = 1; else { gcc_assert (errorcount); decl = error_mark_node; continue; } maybe_push_decl (decl3); if (error_operand_p (decl3)) decl = error_mark_node; else if (decl != error_mark_node && DECL_CHAIN (decl3) != prev && decl != prev) { gcc_assert (errorcount); decl = error_mark_node; } else prev = decl3; } *first = prev; return decl; } /* Return the proper local_specialization for init-capture pack DECL. */ static tree lookup_init_capture_pack (tree decl) { /* We handle normal pack captures by forwarding to the specialization of the captured parameter. We can't do that for pack init-captures; we need them to have their own local_specialization. We created the individual VAR_DECLs (if any) under build_capture_proxy, and we need to collect them when we process the DECL_EXPR for the pack init-capture in the template. So, how do we find them? We don't know the capture proxy pack when building the individual resulting proxies, and we don't know the individual proxies when instantiating the pack. What we have in common is the FIELD_DECL. So...when we instantiate the FIELD_DECL, we stick the result in local_specializations. Then at the DECL_EXPR we look up that result, see how many elements it has, synthesize the names, and look them up. */ tree cname = DECL_NAME (decl); tree val = DECL_VALUE_EXPR (decl); tree field = TREE_OPERAND (val, 1); gcc_assert (TREE_CODE (field) == FIELD_DECL); tree fpack = retrieve_local_specialization (field); if (fpack == error_mark_node) return error_mark_node; int len = 1; tree vec = NULL_TREE; tree r = NULL_TREE; if (TREE_CODE (fpack) == TREE_VEC) { len = TREE_VEC_LENGTH (fpack); vec = make_tree_vec (len); r = make_node (NONTYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (r, vec); } for (int i = 0; i < len; ++i) { tree ename = vec ? make_ith_pack_parameter_name (cname, i) : cname; tree elt = lookup_name (ename); if (vec) TREE_VEC_ELT (vec, i) = elt; else r = elt; } return r; } /* Like tsubst_copy for expressions, etc. but also does semantic processing. */ tree tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl, bool integral_constant_expression_p) { #define RETURN(EXP) do { r = (EXP); goto out; } while(0) #define RECUR(NODE) \ tsubst_expr ((NODE), args, complain, in_decl, \ integral_constant_expression_p) tree stmt, tmp; tree r; location_t loc; if (t == NULL_TREE || t == error_mark_node) return t; loc = input_location; if (location_t eloc = cp_expr_location (t)) input_location = eloc; if (STATEMENT_CODE_P (TREE_CODE (t))) current_stmt_tree ()->stmts_are_full_exprs_p = STMT_IS_FULL_EXPR_P (t); switch (TREE_CODE (t)) { case STATEMENT_LIST: { tree_stmt_iterator i; for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i)) RECUR (tsi_stmt (i)); break; } case CTOR_INITIALIZER: finish_mem_initializers (tsubst_initializer_list (TREE_OPERAND (t, 0), args)); break; case RETURN_EXPR: finish_return_stmt (RECUR (TREE_OPERAND (t, 0))); break; case CO_RETURN_EXPR: finish_co_return_stmt (input_location, RECUR (TREE_OPERAND (t, 0))); break; case CO_YIELD_EXPR: stmt = finish_co_yield_expr (input_location, RECUR (TREE_OPERAND (t, 0))); RETURN (stmt); break; case CO_AWAIT_EXPR: stmt = finish_co_await_expr (input_location, RECUR (TREE_OPERAND (t, 0))); RETURN (stmt); break; case EXPR_STMT: tmp = RECUR (EXPR_STMT_EXPR (t)); if (EXPR_STMT_STMT_EXPR_RESULT (t)) finish_stmt_expr_expr (tmp, cur_stmt_expr); else finish_expr_stmt (tmp); break; case USING_STMT: finish_using_directive (USING_STMT_NAMESPACE (t), /*attribs=*/NULL_TREE); break; case DECL_EXPR: { tree decl, pattern_decl; tree init; pattern_decl = decl = DECL_EXPR_DECL (t); if (TREE_CODE (decl) == LABEL_DECL) finish_label_decl (DECL_NAME (decl)); else if (TREE_CODE (decl) == USING_DECL) { tree scope = USING_DECL_SCOPE (decl); tree name = DECL_NAME (decl); scope = tsubst (scope, args, complain, in_decl); finish_nonmember_using_decl (scope, name); } else if (is_capture_proxy (decl) && !DECL_TEMPLATE_INSTANTIATION (current_function_decl)) { /* We're in tsubst_lambda_expr, we've already inserted a new capture proxy, so look it up and register it. */ tree inst; if (!DECL_PACK_P (decl)) { inst = lookup_name (DECL_NAME (decl), LOOK_where::BLOCK, LOOK_want::HIDDEN_LAMBDA); gcc_assert (inst != decl && is_capture_proxy (inst)); } else if (is_normal_capture_proxy (decl)) { inst = (retrieve_local_specialization (DECL_CAPTURED_VARIABLE (decl))); gcc_assert (TREE_CODE (inst) == NONTYPE_ARGUMENT_PACK || DECL_PACK_P (inst)); } else inst = lookup_init_capture_pack (decl); register_local_specialization (inst, decl); break; } else if (DECL_PRETTY_FUNCTION_P (decl)) decl = make_fname_decl (DECL_SOURCE_LOCATION (decl), DECL_NAME (decl), true/*DECL_PRETTY_FUNCTION_P (decl)*/); else if (DECL_IMPLICIT_TYPEDEF_P (decl) && LAMBDA_TYPE_P (TREE_TYPE (decl))) /* Don't copy the old closure; we'll create a new one in tsubst_lambda_expr. */ break; else { init = DECL_INITIAL (decl); decl = tsubst (decl, args, complain, in_decl); if (decl != error_mark_node) { /* By marking the declaration as instantiated, we avoid trying to instantiate it. Since instantiate_decl can't handle local variables, and since we've already done all that needs to be done, that's the right thing to do. */ if (VAR_P (decl)) DECL_TEMPLATE_INSTANTIATED (decl) = 1; if (VAR_P (decl) && !DECL_NAME (decl) && ANON_AGGR_TYPE_P (TREE_TYPE (decl))) /* Anonymous aggregates are a special case. */ finish_anon_union (decl); else if (is_capture_proxy (DECL_EXPR_DECL (t))) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl) == this_identifier) { tree lam = DECL_CONTEXT (current_function_decl); lam = CLASSTYPE_LAMBDA_EXPR (lam); LAMBDA_EXPR_THIS_CAPTURE (lam) = decl; } insert_capture_proxy (decl); } else if (DECL_IMPLICIT_TYPEDEF_P (t)) /* We already did a pushtag. */; else if (VAR_OR_FUNCTION_DECL_P (decl) && DECL_LOCAL_DECL_P (decl)) { if (TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL) DECL_CONTEXT (decl) = NULL_TREE; decl = pushdecl (decl); if (TREE_CODE (decl) == FUNCTION_DECL && DECL_OMP_DECLARE_REDUCTION_P (decl) && cp_check_omp_declare_reduction (decl)) instantiate_body (pattern_decl, args, decl, true); } else { bool const_init = false; unsigned int cnt = 0; tree first = NULL_TREE, ndecl = error_mark_node; maybe_push_decl (decl); if (VAR_P (decl) && DECL_DECOMPOSITION_P (decl) && TREE_TYPE (pattern_decl) != error_mark_node) ndecl = tsubst_decomp_names (decl, pattern_decl, args, complain, in_decl, &first, &cnt); init = tsubst_init (init, decl, args, complain, in_decl); if (VAR_P (decl)) const_init = (DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (pattern_decl)); if (ndecl != error_mark_node) cp_maybe_mangle_decomp (ndecl, first, cnt); /* In a non-template function, VLA type declarations are handled in grokdeclarator; for templates, handle them now. */ predeclare_vla (decl); cp_finish_decl (decl, init, const_init, NULL_TREE, 0); if (ndecl != error_mark_node) cp_finish_decomp (ndecl, first, cnt); } } } break; } case FOR_STMT: stmt = begin_for_stmt (NULL_TREE, NULL_TREE); RECUR (FOR_INIT_STMT (t)); finish_init_stmt (stmt); tmp = RECUR (FOR_COND (t)); finish_for_cond (tmp, stmt, false, 0); tmp = RECUR (FOR_EXPR (t)); finish_for_expr (tmp, stmt); { bool prev = note_iteration_stmt_body_start (); RECUR (FOR_BODY (t)); note_iteration_stmt_body_end (prev); } finish_for_stmt (stmt); break; case RANGE_FOR_STMT: { /* Construct another range_for, if this is not a final substitution (for inside a generic lambda of a template). Otherwise convert to a regular for. */ tree decl, expr; stmt = (processing_template_decl ? begin_range_for_stmt (NULL_TREE, NULL_TREE) : begin_for_stmt (NULL_TREE, NULL_TREE)); RECUR (RANGE_FOR_INIT_STMT (t)); decl = RANGE_FOR_DECL (t); decl = tsubst (decl, args, complain, in_decl); maybe_push_decl (decl); expr = RECUR (RANGE_FOR_EXPR (t)); tree decomp_first = NULL_TREE; unsigned decomp_cnt = 0; if (VAR_P (decl) && DECL_DECOMPOSITION_P (decl)) decl = tsubst_decomp_names (decl, RANGE_FOR_DECL (t), args, complain, in_decl, &decomp_first, &decomp_cnt); if (processing_template_decl) { RANGE_FOR_IVDEP (stmt) = RANGE_FOR_IVDEP (t); RANGE_FOR_UNROLL (stmt) = RANGE_FOR_UNROLL (t); finish_range_for_decl (stmt, decl, expr); if (decomp_first && decl != error_mark_node) cp_finish_decomp (decl, decomp_first, decomp_cnt); } else { unsigned short unroll = (RANGE_FOR_UNROLL (t) ? tree_to_uhwi (RANGE_FOR_UNROLL (t)) : 0); stmt = cp_convert_range_for (stmt, decl, expr, decomp_first, decomp_cnt, RANGE_FOR_IVDEP (t), unroll); } bool prev = note_iteration_stmt_body_start (); RECUR (RANGE_FOR_BODY (t)); note_iteration_stmt_body_end (prev); finish_for_stmt (stmt); } break; case WHILE_STMT: stmt = begin_while_stmt (); tmp = RECUR (WHILE_COND (t)); finish_while_stmt_cond (tmp, stmt, false, 0); { bool prev = note_iteration_stmt_body_start (); RECUR (WHILE_BODY (t)); note_iteration_stmt_body_end (prev); } finish_while_stmt (stmt); break; case DO_STMT: stmt = begin_do_stmt (); { bool prev = note_iteration_stmt_body_start (); RECUR (DO_BODY (t)); note_iteration_stmt_body_end (prev); } finish_do_body (stmt); tmp = RECUR (DO_COND (t)); finish_do_stmt (tmp, stmt, false, 0); break; case IF_STMT: stmt = begin_if_stmt (); IF_STMT_CONSTEXPR_P (stmt) = IF_STMT_CONSTEXPR_P (t); if (IF_STMT_CONSTEXPR_P (t)) args = add_extra_args (IF_STMT_EXTRA_ARGS (t), args); tmp = RECUR (IF_COND (t)); tmp = finish_if_stmt_cond (tmp, stmt); if (IF_STMT_CONSTEXPR_P (t) && instantiation_dependent_expression_p (tmp)) { /* We're partially instantiating a generic lambda, but the condition of the constexpr if is still dependent. Don't substitute into the branches now, just remember the template arguments. */ do_poplevel (IF_SCOPE (stmt)); IF_COND (stmt) = IF_COND (t); THEN_CLAUSE (stmt) = THEN_CLAUSE (t); ELSE_CLAUSE (stmt) = ELSE_CLAUSE (t); IF_STMT_EXTRA_ARGS (stmt) = build_extra_args (t, args, complain); add_stmt (stmt); break; } if (IF_STMT_CONSTEXPR_P (t) && integer_zerop (tmp)) /* Don't instantiate the THEN_CLAUSE. */; else { tree folded = fold_non_dependent_expr (tmp, complain); bool inhibit = integer_zerop (folded); if (inhibit) ++c_inhibit_evaluation_warnings; RECUR (THEN_CLAUSE (t)); if (inhibit) --c_inhibit_evaluation_warnings; } finish_then_clause (stmt); if (IF_STMT_CONSTEXPR_P (t) && integer_nonzerop (tmp)) /* Don't instantiate the ELSE_CLAUSE. */; else if (ELSE_CLAUSE (t)) { tree folded = fold_non_dependent_expr (tmp, complain); bool inhibit = integer_nonzerop (folded); begin_else_clause (stmt); if (inhibit) ++c_inhibit_evaluation_warnings; RECUR (ELSE_CLAUSE (t)); if (inhibit) --c_inhibit_evaluation_warnings; finish_else_clause (stmt); } finish_if_stmt (stmt); break; case BIND_EXPR: if (BIND_EXPR_BODY_BLOCK (t)) stmt = begin_function_body (); else stmt = begin_compound_stmt (BIND_EXPR_TRY_BLOCK (t) ? BCS_TRY_BLOCK : 0); RECUR (BIND_EXPR_BODY (t)); if (BIND_EXPR_BODY_BLOCK (t)) finish_function_body (stmt); else finish_compound_stmt (stmt); break; case BREAK_STMT: finish_break_stmt (); break; case CONTINUE_STMT: finish_continue_stmt (); break; case SWITCH_STMT: stmt = begin_switch_stmt (); tmp = RECUR (SWITCH_STMT_COND (t)); finish_switch_cond (tmp, stmt); RECUR (SWITCH_STMT_BODY (t)); finish_switch_stmt (stmt); break; case CASE_LABEL_EXPR: { tree decl = CASE_LABEL (t); tree low = RECUR (CASE_LOW (t)); tree high = RECUR (CASE_HIGH (t)); tree l = finish_case_label (EXPR_LOCATION (t), low, high); if (l && TREE_CODE (l) == CASE_LABEL_EXPR) { tree label = CASE_LABEL (l); FALLTHROUGH_LABEL_P (label) = FALLTHROUGH_LABEL_P (decl); if (DECL_ATTRIBUTES (decl) != NULL_TREE) cplus_decl_attributes (&label, DECL_ATTRIBUTES (decl), 0); } } break; case LABEL_EXPR: { tree decl = LABEL_EXPR_LABEL (t); tree label; label = finish_label_stmt (DECL_NAME (decl)); if (TREE_CODE (label) == LABEL_DECL) FALLTHROUGH_LABEL_P (label) = FALLTHROUGH_LABEL_P (decl); if (DECL_ATTRIBUTES (decl) != NULL_TREE) cplus_decl_attributes (&label, DECL_ATTRIBUTES (decl), 0); } break; case GOTO_EXPR: tmp = GOTO_DESTINATION (t); if (TREE_CODE (tmp) != LABEL_DECL) /* Computed goto's must be tsubst'd into. On the other hand, non-computed gotos must not be; the identifier in question will have no binding. */ tmp = RECUR (tmp); else tmp = DECL_NAME (tmp); finish_goto_stmt (tmp); break; case ASM_EXPR: { tree string = RECUR (ASM_STRING (t)); tree outputs = tsubst_copy_asm_operands (ASM_OUTPUTS (t), args, complain, in_decl); tree inputs = tsubst_copy_asm_operands (ASM_INPUTS (t), args, complain, in_decl); tree clobbers = tsubst_copy_asm_operands (ASM_CLOBBERS (t), args, complain, in_decl); tree labels = tsubst_copy_asm_operands (ASM_LABELS (t), args, complain, in_decl); tmp = finish_asm_stmt (EXPR_LOCATION (t), ASM_VOLATILE_P (t), string, outputs, inputs, clobbers, labels, ASM_INLINE_P (t)); tree asm_expr = tmp; if (TREE_CODE (asm_expr) == CLEANUP_POINT_EXPR) asm_expr = TREE_OPERAND (asm_expr, 0); ASM_INPUT_P (asm_expr) = ASM_INPUT_P (t); } break; case TRY_BLOCK: if (CLEANUP_P (t)) { stmt = begin_try_block (); RECUR (TRY_STMTS (t)); finish_cleanup_try_block (stmt); finish_cleanup (RECUR (TRY_HANDLERS (t)), stmt); } else { tree compound_stmt = NULL_TREE; if (FN_TRY_BLOCK_P (t)) stmt = begin_function_try_block (&compound_stmt); else stmt = begin_try_block (); RECUR (TRY_STMTS (t)); if (FN_TRY_BLOCK_P (t)) finish_function_try_block (stmt); else finish_try_block (stmt); RECUR (TRY_HANDLERS (t)); if (FN_TRY_BLOCK_P (t)) finish_function_handler_sequence (stmt, compound_stmt); else finish_handler_sequence (stmt); } break; case HANDLER: { tree decl = HANDLER_PARMS (t); if (decl) { decl = tsubst (decl, args, complain, in_decl); /* Prevent instantiate_decl from trying to instantiate this variable. We've already done all that needs to be done. */ if (decl != error_mark_node) DECL_TEMPLATE_INSTANTIATED (decl) = 1; } stmt = begin_handler (); finish_handler_parms (decl, stmt); RECUR (HANDLER_BODY (t)); finish_handler (stmt); } break; case TAG_DEFN: tmp = tsubst (TREE_TYPE (t), args, complain, NULL_TREE); if (CLASS_TYPE_P (tmp)) { /* Local classes are not independent templates; they are instantiated along with their containing function. And this way we don't have to deal with pushing out of one local class to instantiate a member of another local class. */ /* Closures are handled by the LAMBDA_EXPR. */ gcc_assert (!LAMBDA_TYPE_P (TREE_TYPE (t))); complete_type (tmp); for (tree fld = TYPE_FIELDS (tmp); fld; fld = DECL_CHAIN (fld)) if ((VAR_P (fld) || (TREE_CODE (fld) == FUNCTION_DECL && !DECL_ARTIFICIAL (fld))) && DECL_TEMPLATE_INSTANTIATION (fld)) instantiate_decl (fld, /*defer_ok=*/false, /*expl_inst_class=*/false); } break; case STATIC_ASSERT: { tree condition; ++c_inhibit_evaluation_warnings; condition = tsubst_expr (STATIC_ASSERT_CONDITION (t), args, complain, in_decl, /*integral_constant_expression_p=*/true); --c_inhibit_evaluation_warnings; finish_static_assert (condition, STATIC_ASSERT_MESSAGE (t), STATIC_ASSERT_SOURCE_LOCATION (t), /*member_p=*/false); } break; case OACC_KERNELS: case OACC_PARALLEL: case OACC_SERIAL: tmp = tsubst_omp_clauses (OMP_CLAUSES (t), C_ORT_ACC, args, complain, in_decl); stmt = begin_omp_parallel (); RECUR (OMP_BODY (t)); finish_omp_construct (TREE_CODE (t), stmt, tmp); break; case OMP_PARALLEL: r = push_omp_privatization_clauses (OMP_PARALLEL_COMBINED (t)); tmp = tsubst_omp_clauses (OMP_PARALLEL_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); if (OMP_PARALLEL_COMBINED (t)) omp_parallel_combined_clauses = &tmp; stmt = begin_omp_parallel (); RECUR (OMP_PARALLEL_BODY (t)); gcc_assert (omp_parallel_combined_clauses == NULL); OMP_PARALLEL_COMBINED (finish_omp_parallel (tmp, stmt)) = OMP_PARALLEL_COMBINED (t); pop_omp_privatization_clauses (r); break; case OMP_TASK: if (OMP_TASK_BODY (t) == NULL_TREE) { tmp = tsubst_omp_clauses (OMP_TASK_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); t = copy_node (t); OMP_TASK_CLAUSES (t) = tmp; add_stmt (t); break; } r = push_omp_privatization_clauses (false); tmp = tsubst_omp_clauses (OMP_TASK_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); stmt = begin_omp_task (); RECUR (OMP_TASK_BODY (t)); finish_omp_task (tmp, stmt); pop_omp_privatization_clauses (r); break; case OMP_FOR: case OMP_LOOP: case OMP_SIMD: case OMP_DISTRIBUTE: case OMP_TASKLOOP: case OACC_LOOP: { tree clauses, body, pre_body; tree declv = NULL_TREE, initv = NULL_TREE, condv = NULL_TREE; tree orig_declv = NULL_TREE; tree incrv = NULL_TREE; enum c_omp_region_type ort = C_ORT_OMP; bool any_range_for = false; int i; if (TREE_CODE (t) == OACC_LOOP) ort = C_ORT_ACC; r = push_omp_privatization_clauses (OMP_FOR_INIT (t) == NULL_TREE); clauses = tsubst_omp_clauses (OMP_FOR_CLAUSES (t), ort, args, complain, in_decl); if (OMP_FOR_INIT (t) != NULL_TREE) { declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); if (OMP_FOR_ORIG_DECLS (t)) orig_declv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); initv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); condv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); incrv = make_tree_vec (TREE_VEC_LENGTH (OMP_FOR_INIT (t))); } keep_next_level (true); stmt = begin_omp_structured_block (); pre_body = push_stmt_list (); RECUR (OMP_FOR_PRE_BODY (t)); pre_body = pop_stmt_list (pre_body); if (OMP_FOR_INIT (t) != NULL_TREE) for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++) any_range_for |= tsubst_omp_for_iterator (t, i, declv, orig_declv, initv, condv, incrv, &clauses, args, complain, in_decl, integral_constant_expression_p); omp_parallel_combined_clauses = NULL; if (any_range_for) { gcc_assert (orig_declv); body = begin_omp_structured_block (); for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++) if (TREE_VEC_ELT (orig_declv, i) != TREE_VEC_ELT (declv, i) && TREE_CODE (TREE_VEC_ELT (orig_declv, i)) == TREE_LIST && TREE_CHAIN (TREE_VEC_ELT (orig_declv, i))) cp_finish_omp_range_for (TREE_VEC_ELT (orig_declv, i), TREE_VEC_ELT (declv, i)); } else body = push_stmt_list (); RECUR (OMP_FOR_BODY (t)); if (any_range_for) body = finish_omp_structured_block (body); else body = pop_stmt_list (body); if (OMP_FOR_INIT (t) != NULL_TREE) t = finish_omp_for (EXPR_LOCATION (t), TREE_CODE (t), declv, orig_declv, initv, condv, incrv, body, pre_body, NULL, clauses); else { t = make_node (TREE_CODE (t)); TREE_TYPE (t) = void_type_node; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; OMP_FOR_CLAUSES (t) = clauses; SET_EXPR_LOCATION (t, EXPR_LOCATION (t)); add_stmt (t); } add_stmt (finish_omp_for_block (finish_omp_structured_block (stmt), t)); pop_omp_privatization_clauses (r); } break; case OMP_SECTIONS: omp_parallel_combined_clauses = NULL; /* FALLTHRU */ case OMP_SINGLE: case OMP_TEAMS: case OMP_CRITICAL: case OMP_TASKGROUP: case OMP_SCAN: r = push_omp_privatization_clauses (TREE_CODE (t) == OMP_TEAMS && OMP_TEAMS_COMBINED (t)); tmp = tsubst_omp_clauses (OMP_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); if (TREE_CODE (t) == OMP_TEAMS) { keep_next_level (true); stmt = begin_omp_structured_block (); RECUR (OMP_BODY (t)); stmt = finish_omp_structured_block (stmt); } else { stmt = push_stmt_list (); RECUR (OMP_BODY (t)); stmt = pop_stmt_list (stmt); } if (TREE_CODE (t) == OMP_CRITICAL && tmp != NULL_TREE && integer_nonzerop (OMP_CLAUSE_HINT_EXPR (tmp))) { error_at (OMP_CLAUSE_LOCATION (tmp), "%<#pragma omp critical%> with %<hint%> clause requires " "a name, except when %<omp_sync_hint_none%> is used"); RETURN (error_mark_node); } t = copy_node (t); OMP_BODY (t) = stmt; OMP_CLAUSES (t) = tmp; add_stmt (t); pop_omp_privatization_clauses (r); break; case OMP_DEPOBJ: r = RECUR (OMP_DEPOBJ_DEPOBJ (t)); if (OMP_DEPOBJ_CLAUSES (t) && OMP_DEPOBJ_CLAUSES (t) != error_mark_node) { enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_SOURCE; if (TREE_CODE (OMP_DEPOBJ_CLAUSES (t)) == OMP_CLAUSE) { tmp = tsubst_omp_clauses (OMP_DEPOBJ_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); if (tmp == NULL_TREE) tmp = error_mark_node; } else { kind = (enum omp_clause_depend_kind) tree_to_uhwi (OMP_DEPOBJ_CLAUSES (t)); tmp = NULL_TREE; } finish_omp_depobj (EXPR_LOCATION (t), r, kind, tmp); } else finish_omp_depobj (EXPR_LOCATION (t), r, OMP_CLAUSE_DEPEND_SOURCE, OMP_DEPOBJ_CLAUSES (t)); break; case OACC_DATA: case OMP_TARGET_DATA: case OMP_TARGET: tmp = tsubst_omp_clauses (OMP_CLAUSES (t), (TREE_CODE (t) == OACC_DATA) ? C_ORT_ACC : C_ORT_OMP, args, complain, in_decl); keep_next_level (true); stmt = begin_omp_structured_block (); RECUR (OMP_BODY (t)); stmt = finish_omp_structured_block (stmt); t = copy_node (t); OMP_BODY (t) = stmt; OMP_CLAUSES (t) = tmp; if (TREE_CODE (t) == OMP_TARGET && OMP_TARGET_COMBINED (t)) { tree teams = cp_walk_tree (&stmt, tsubst_find_omp_teams, NULL, NULL); if (teams) { /* For combined target teams, ensure the num_teams and thread_limit clause expressions are evaluated on the host, before entering the target construct. */ tree c; for (c = OMP_TEAMS_CLAUSES (teams); c; c = OMP_CLAUSE_CHAIN (c)) if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT) && TREE_CODE (OMP_CLAUSE_OPERAND (c, 0)) != INTEGER_CST) { tree expr = OMP_CLAUSE_OPERAND (c, 0); expr = force_target_expr (TREE_TYPE (expr), expr, tf_none); if (expr == error_mark_node) continue; tmp = TARGET_EXPR_SLOT (expr); add_stmt (expr); OMP_CLAUSE_OPERAND (c, 0) = expr; tree tc = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (tc) = tmp; OMP_CLAUSE_CHAIN (tc) = OMP_TARGET_CLAUSES (t); OMP_TARGET_CLAUSES (t) = tc; } } } add_stmt (t); break; case OACC_DECLARE: t = copy_node (t); tmp = tsubst_omp_clauses (OACC_DECLARE_CLAUSES (t), C_ORT_ACC, args, complain, in_decl); OACC_DECLARE_CLAUSES (t) = tmp; add_stmt (t); break; case OMP_TARGET_UPDATE: case OMP_TARGET_ENTER_DATA: case OMP_TARGET_EXIT_DATA: tmp = tsubst_omp_clauses (OMP_STANDALONE_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); t = copy_node (t); OMP_STANDALONE_CLAUSES (t) = tmp; add_stmt (t); break; case OACC_ENTER_DATA: case OACC_EXIT_DATA: case OACC_UPDATE: tmp = tsubst_omp_clauses (OMP_STANDALONE_CLAUSES (t), C_ORT_ACC, args, complain, in_decl); t = copy_node (t); OMP_STANDALONE_CLAUSES (t) = tmp; add_stmt (t); break; case OMP_ORDERED: tmp = tsubst_omp_clauses (OMP_ORDERED_CLAUSES (t), C_ORT_OMP, args, complain, in_decl); stmt = push_stmt_list (); RECUR (OMP_BODY (t)); stmt = pop_stmt_list (stmt); t = copy_node (t); OMP_BODY (t) = stmt; OMP_ORDERED_CLAUSES (t) = tmp; add_stmt (t); break; case OMP_MASTER: omp_parallel_combined_clauses = NULL; /* FALLTHRU */ case OMP_SECTION: stmt = push_stmt_list (); RECUR (OMP_BODY (t)); stmt = pop_stmt_list (stmt); t = copy_node (t); OMP_BODY (t) = stmt; add_stmt (t); break; case OMP_ATOMIC: gcc_assert (OMP_ATOMIC_DEPENDENT_P (t)); tmp = NULL_TREE; if (TREE_CODE (TREE_OPERAND (t, 0)) == OMP_CLAUSE) tmp = tsubst_omp_clauses (TREE_OPERAND (t, 0), C_ORT_OMP, args, complain, in_decl); if (TREE_CODE (TREE_OPERAND (t, 1)) != MODIFY_EXPR) { tree op1 = TREE_OPERAND (t, 1); tree rhs1 = NULL_TREE; tree lhs, rhs; if (TREE_CODE (op1) == COMPOUND_EXPR) { rhs1 = RECUR (TREE_OPERAND (op1, 0)); op1 = TREE_OPERAND (op1, 1); } lhs = RECUR (TREE_OPERAND (op1, 0)); rhs = RECUR (TREE_OPERAND (op1, 1)); finish_omp_atomic (EXPR_LOCATION (t), OMP_ATOMIC, TREE_CODE (op1), lhs, rhs, NULL_TREE, NULL_TREE, rhs1, tmp, OMP_ATOMIC_MEMORY_ORDER (t)); } else { tree op1 = TREE_OPERAND (t, 1); tree v = NULL_TREE, lhs, rhs = NULL_TREE, lhs1 = NULL_TREE; tree rhs1 = NULL_TREE; enum tree_code code = TREE_CODE (TREE_OPERAND (op1, 1)); enum tree_code opcode = NOP_EXPR; if (code == OMP_ATOMIC_READ) { v = RECUR (TREE_OPERAND (op1, 0)); lhs = RECUR (TREE_OPERAND (TREE_OPERAND (op1, 1), 0)); } else if (code == OMP_ATOMIC_CAPTURE_OLD || code == OMP_ATOMIC_CAPTURE_NEW) { tree op11 = TREE_OPERAND (TREE_OPERAND (op1, 1), 1); v = RECUR (TREE_OPERAND (op1, 0)); lhs1 = RECUR (TREE_OPERAND (TREE_OPERAND (op1, 1), 0)); if (TREE_CODE (op11) == COMPOUND_EXPR) { rhs1 = RECUR (TREE_OPERAND (op11, 0)); op11 = TREE_OPERAND (op11, 1); } lhs = RECUR (TREE_OPERAND (op11, 0)); rhs = RECUR (TREE_OPERAND (op11, 1)); opcode = TREE_CODE (op11); if (opcode == MODIFY_EXPR) opcode = NOP_EXPR; } else { code = OMP_ATOMIC; lhs = RECUR (TREE_OPERAND (op1, 0)); rhs = RECUR (TREE_OPERAND (op1, 1)); } finish_omp_atomic (EXPR_LOCATION (t), code, opcode, lhs, rhs, v, lhs1, rhs1, tmp, OMP_ATOMIC_MEMORY_ORDER (t)); } break; case TRANSACTION_EXPR: { int flags = 0; flags |= (TRANSACTION_EXPR_OUTER (t) ? TM_STMT_ATTR_OUTER : 0); flags |= (TRANSACTION_EXPR_RELAXED (t) ? TM_STMT_ATTR_RELAXED : 0); if (TRANSACTION_EXPR_IS_STMT (t)) { tree body = TRANSACTION_EXPR_BODY (t); tree noex = NULL_TREE; if (TREE_CODE (body) == MUST_NOT_THROW_EXPR) { noex = MUST_NOT_THROW_COND (body); if (noex == NULL_TREE) noex = boolean_true_node; body = TREE_OPERAND (body, 0); } stmt = begin_transaction_stmt (input_location, NULL, flags); RECUR (body); finish_transaction_stmt (stmt, NULL, flags, RECUR (noex)); } else { stmt = build_transaction_expr (EXPR_LOCATION (t), RECUR (TRANSACTION_EXPR_BODY (t)), flags, NULL_TREE); RETURN (stmt); } } break; case MUST_NOT_THROW_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree cond = RECUR (MUST_NOT_THROW_COND (t)); RETURN (build_must_not_throw_expr (op0, cond)); } case EXPR_PACK_EXPANSION: error ("invalid use of pack expansion expression"); RETURN (error_mark_node); case NONTYPE_ARGUMENT_PACK: error ("use %<...%> to expand argument pack"); RETURN (error_mark_node); case COMPOUND_EXPR: tmp = RECUR (TREE_OPERAND (t, 0)); if (tmp == NULL_TREE) /* If the first operand was a statement, we're done with it. */ RETURN (RECUR (TREE_OPERAND (t, 1))); RETURN (build_x_compound_expr (EXPR_LOCATION (t), tmp, RECUR (TREE_OPERAND (t, 1)), complain)); case ANNOTATE_EXPR: tmp = RECUR (TREE_OPERAND (t, 0)); RETURN (build3_loc (EXPR_LOCATION (t), ANNOTATE_EXPR, TREE_TYPE (tmp), tmp, RECUR (TREE_OPERAND (t, 1)), RECUR (TREE_OPERAND (t, 2)))); case PREDICT_EXPR: RETURN (add_stmt (copy_node (t))); default: gcc_assert (!STATEMENT_CODE_P (TREE_CODE (t))); RETURN (tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false, integral_constant_expression_p)); } RETURN (NULL_TREE); out: input_location = loc; return r; #undef RECUR #undef RETURN } /* Instantiate the special body of the artificial DECL_OMP_DECLARE_REDUCTION function. For description of the body see comment above cp_parser_omp_declare_reduction_exprs. */ static void tsubst_omp_udr (tree t, tree args, tsubst_flags_t complain, tree in_decl) { if (t == NULL_TREE || t == error_mark_node) return; gcc_assert (TREE_CODE (t) == STATEMENT_LIST && current_function_decl); tree_stmt_iterator tsi; int i; tree stmts[7]; memset (stmts, 0, sizeof stmts); for (i = 0, tsi = tsi_start (t); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) stmts[i] = tsi_stmt (tsi); gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR && TREE_CODE (stmts[1]) == DECL_EXPR); tree omp_out = tsubst (DECL_EXPR_DECL (stmts[0]), args, complain, in_decl); tree omp_in = tsubst (DECL_EXPR_DECL (stmts[1]), args, complain, in_decl); /* tsubsting a local var_decl leaves DECL_CONTEXT null, as we expect to be pushing it. */ DECL_CONTEXT (omp_out) = current_function_decl; DECL_CONTEXT (omp_in) = current_function_decl; keep_next_level (true); tree block = begin_omp_structured_block (); tsubst_expr (stmts[2], args, complain, in_decl, false); block = finish_omp_structured_block (block); block = maybe_cleanup_point_expr_void (block); add_decl_expr (omp_out); if (TREE_NO_WARNING (DECL_EXPR_DECL (stmts[0]))) TREE_NO_WARNING (omp_out) = 1; add_decl_expr (omp_in); finish_expr_stmt (block); } if (i >= 6) { gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR && TREE_CODE (stmts[4]) == DECL_EXPR); tree omp_priv = tsubst (DECL_EXPR_DECL (stmts[3]), args, complain, in_decl); tree omp_orig = tsubst (DECL_EXPR_DECL (stmts[4]), args, complain, in_decl); DECL_CONTEXT (omp_priv) = current_function_decl; DECL_CONTEXT (omp_orig) = current_function_decl; keep_next_level (true); tree block = begin_omp_structured_block (); tsubst_expr (stmts[5], args, complain, in_decl, false); block = finish_omp_structured_block (block); block = maybe_cleanup_point_expr_void (block); cp_walk_tree (&block, cp_remove_omp_priv_cleanup_stmt, omp_priv, NULL); add_decl_expr (omp_priv); add_decl_expr (omp_orig); finish_expr_stmt (block); if (i == 7) add_decl_expr (omp_orig); } } /* T is a postfix-expression that is not being used in a function call. Return the substituted version of T. */ static tree tsubst_non_call_postfix_expression (tree t, tree args, tsubst_flags_t complain, tree in_decl) { if (TREE_CODE (t) == SCOPE_REF) t = tsubst_qualified_id (t, args, complain, in_decl, /*done=*/false, /*address_p=*/false); else t = tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); return t; } /* Subroutine of tsubst_lambda_expr: add the FIELD/INIT capture pair to the LAMBDA_EXPR_CAPTURE_LIST passed in LIST. Do deduction for a previously dependent init-capture. */ static void prepend_one_capture (tree field, tree init, tree &list, tsubst_flags_t complain) { if (tree auto_node = type_uses_auto (TREE_TYPE (field))) { tree type = NULL_TREE; if (!init) { if (complain & tf_error) error ("empty initializer in lambda init-capture"); init = error_mark_node; } else if (TREE_CODE (init) == TREE_LIST) init = build_x_compound_expr_from_list (init, ELK_INIT, complain); if (!type) type = do_auto_deduction (TREE_TYPE (field), init, auto_node, complain); TREE_TYPE (field) = type; cp_apply_type_quals_to_decl (cp_type_quals (type), field); } list = tree_cons (field, init, list); } /* T is a LAMBDA_EXPR. Generate a new LAMBDA_EXPR for the current instantiation context. Instantiating a pack expansion containing a lambda might result in multiple lambdas all based on the same lambda in the template. */ tree tsubst_lambda_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl) { tree oldfn = lambda_function (t); in_decl = oldfn; tree r = build_lambda_expr (); LAMBDA_EXPR_LOCATION (r) = LAMBDA_EXPR_LOCATION (t); LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (r) = LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (t); LAMBDA_EXPR_MUTABLE_P (r) = LAMBDA_EXPR_MUTABLE_P (t); LAMBDA_EXPR_INSTANTIATED (r) = true; if (LAMBDA_EXPR_EXTRA_SCOPE (t) == NULL_TREE) /* A lambda in a default argument outside a class gets no LAMBDA_EXPR_EXTRA_SCOPE, as specified by the ABI. But tsubst_default_argument calls start_lambda_scope, so we need to specifically ignore it here, and use the global scope. */ record_null_lambda_scope (r); else record_lambda_scope (r); gcc_assert (LAMBDA_EXPR_THIS_CAPTURE (t) == NULL_TREE && LAMBDA_EXPR_PENDING_PROXIES (t) == NULL); vec<tree,va_gc>* field_packs = NULL; for (tree cap = LAMBDA_EXPR_CAPTURE_LIST (t); cap; cap = TREE_CHAIN (cap)) { tree ofield = TREE_PURPOSE (cap); tree init = TREE_VALUE (cap); if (PACK_EXPANSION_P (init)) init = tsubst_pack_expansion (init, args, complain, in_decl); else init = tsubst_copy_and_build (init, args, complain, in_decl, /*fn*/false, /*constexpr*/false); if (init == error_mark_node) return error_mark_node; if (init && TREE_CODE (init) == TREE_LIST) init = build_x_compound_expr_from_list (init, ELK_INIT, complain); if (!processing_template_decl && init && TREE_CODE (init) != TREE_VEC && variably_modified_type_p (TREE_TYPE (init), NULL_TREE)) { /* For a VLA, simply tsubsting the field type won't work, we need to go through add_capture again. XXX do we want to do this for all captures? */ tree name = (get_identifier (IDENTIFIER_POINTER (DECL_NAME (ofield)) + 2)); tree ftype = TREE_TYPE (ofield); bool by_ref = (TYPE_REF_P (ftype) || (TREE_CODE (ftype) == DECLTYPE_TYPE && DECLTYPE_FOR_REF_CAPTURE (ftype))); add_capture (r, name, init, by_ref, !DECL_NORMAL_CAPTURE_P (ofield)); continue; } if (PACK_EXPANSION_P (ofield)) ofield = PACK_EXPANSION_PATTERN (ofield); tree field = tsubst_decl (ofield, args, complain); if (DECL_PACK_P (ofield) && !DECL_NORMAL_CAPTURE_P (ofield)) { /* Remember these for when we've pushed local_specializations. */ vec_safe_push (field_packs, ofield); vec_safe_push (field_packs, field); } if (field == error_mark_node) return error_mark_node; if (TREE_CODE (field) == TREE_VEC) { int len = TREE_VEC_LENGTH (field); gcc_assert (TREE_CODE (init) == TREE_VEC && TREE_VEC_LENGTH (init) == len); for (int i = 0; i < len; ++i) prepend_one_capture (TREE_VEC_ELT (field, i), TREE_VEC_ELT (init, i), LAMBDA_EXPR_CAPTURE_LIST (r), complain); } else { prepend_one_capture (field, init, LAMBDA_EXPR_CAPTURE_LIST (r), complain); if (id_equal (DECL_NAME (field), "__this")) LAMBDA_EXPR_THIS_CAPTURE (r) = field; } } tree type = begin_lambda_type (r); if (type == error_mark_node) return error_mark_node; /* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */ determine_visibility (TYPE_NAME (type)); register_capture_members (LAMBDA_EXPR_CAPTURE_LIST (r)); tree oldtmpl = (generic_lambda_fn_p (oldfn) ? DECL_TI_TEMPLATE (oldfn) : NULL_TREE); tree fntype = static_fn_type (oldfn); if (oldtmpl) ++processing_template_decl; fntype = tsubst (fntype, args, complain, in_decl); if (oldtmpl) --processing_template_decl; if (fntype == error_mark_node) r = error_mark_node; else { /* The body of a lambda-expression is not a subexpression of the enclosing expression. Parms are to have DECL_CHAIN tsubsted, which would be skipped if cp_unevaluated_operand. */ cp_evaluated ev; /* Fix the type of 'this'. */ fntype = build_memfn_type (fntype, type, type_memfn_quals (fntype), type_memfn_rqual (fntype)); tree fn, tmpl; if (oldtmpl) { tmpl = tsubst_template_decl (oldtmpl, args, complain, fntype); if (tmpl == error_mark_node) { r = error_mark_node; goto out; } fn = DECL_TEMPLATE_RESULT (tmpl); finish_member_declaration (tmpl); } else { tmpl = NULL_TREE; fn = tsubst_function_decl (oldfn, args, complain, fntype); if (fn == error_mark_node) { r = error_mark_node; goto out; } finish_member_declaration (fn); } if (tree ci = get_constraints (oldfn)) { /* Substitute into the lambda's constraints. */ if (oldtmpl) ++processing_template_decl; ci = tsubst_constraint_info (ci, args, complain, in_decl); if (oldtmpl) --processing_template_decl; set_constraints (fn, ci); } /* Let finish_function set this. */ DECL_DECLARED_CONSTEXPR_P (fn) = false; bool nested = cfun; if (nested) push_function_context (); else /* Still increment function_depth so that we don't GC in the middle of an expression. */ ++function_depth; local_specialization_stack s (lss_copy); tree body = start_lambda_function (fn, r); /* Now record them for lookup_init_capture_pack. */ int fplen = vec_safe_length (field_packs); for (int i = 0; i < fplen; ) { tree pack = (*field_packs)[i++]; tree inst = (*field_packs)[i++]; register_local_specialization (inst, pack); } release_tree_vector (field_packs); register_parameter_specializations (oldfn, fn); if (oldtmpl) { /* We might not partially instantiate some parts of the function, so copy these flags from the original template. */ language_function *ol = DECL_STRUCT_FUNCTION (oldfn)->language; current_function_returns_value = ol->returns_value; current_function_returns_null = ol->returns_null; current_function_returns_abnormally = ol->returns_abnormally; current_function_infinite_loop = ol->infinite_loop; } /* [temp.deduct] A lambda-expression appearing in a function type or a template parameter is not considered part of the immediate context for the purposes of template argument deduction. */ complain = tf_warning_or_error; tsubst_expr (DECL_SAVED_TREE (oldfn), args, complain, r, /*constexpr*/false); finish_lambda_function (body); if (nested) pop_function_context (); else --function_depth; /* The capture list was built up in reverse order; fix that now. */ LAMBDA_EXPR_CAPTURE_LIST (r) = nreverse (LAMBDA_EXPR_CAPTURE_LIST (r)); LAMBDA_EXPR_THIS_CAPTURE (r) = NULL_TREE; maybe_add_lambda_conv_op (type); } out: finish_struct (type, /*attr*/NULL_TREE); insert_pending_capture_proxies (); return r; } /* Like tsubst but deals with expressions and performs semantic analysis. FUNCTION_P is true if T is the "F" in "F (ARGS)" or "F<TARGS> (ARGS)". */ tree tsubst_copy_and_build (tree t, tree args, tsubst_flags_t complain, tree in_decl, bool function_p, bool integral_constant_expression_p) { #define RETURN(EXP) do { retval = (EXP); goto out; } while(0) #define RECUR(NODE) \ tsubst_copy_and_build (NODE, args, complain, in_decl, \ /*function_p=*/false, \ integral_constant_expression_p) tree retval, op1; location_t save_loc; if (t == NULL_TREE || t == error_mark_node) return t; save_loc = input_location; if (location_t eloc = cp_expr_location (t)) input_location = eloc; /* N3276 decltype magic only applies to calls at the top level or on the right side of a comma. */ tsubst_flags_t decltype_flag = (complain & tf_decltype); complain &= ~tf_decltype; switch (TREE_CODE (t)) { case USING_DECL: t = DECL_NAME (t); /* Fall through. */ case IDENTIFIER_NODE: { tree decl; cp_id_kind idk; bool non_integral_constant_expression_p; const char *error_msg; if (IDENTIFIER_CONV_OP_P (t)) { tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); t = make_conv_op_name (new_type); } /* Look up the name. */ decl = lookup_name (t); /* By convention, expressions use ERROR_MARK_NODE to indicate failure, not NULL_TREE. */ if (decl == NULL_TREE) decl = error_mark_node; decl = finish_id_expression (t, decl, NULL_TREE, &idk, integral_constant_expression_p, /*allow_non_integral_constant_expression_p=*/(cxx_dialect >= cxx11), &non_integral_constant_expression_p, /*template_p=*/false, /*done=*/true, /*address_p=*/false, /*template_arg_p=*/false, &error_msg, input_location); if (error_msg) error (error_msg); if (!function_p && identifier_p (decl)) { if (complain & tf_error) unqualified_name_lookup_error (decl); decl = error_mark_node; } RETURN (decl); } case TEMPLATE_ID_EXPR: { tree object; tree templ = tsubst_copy_and_build (TREE_OPERAND (t, 0), args, complain, in_decl, function_p, integral_constant_expression_p); tree targs = TREE_OPERAND (t, 1); if (targs) targs = tsubst_template_args (targs, args, complain, in_decl); if (targs == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (templ) == SCOPE_REF) { tree name = TREE_OPERAND (templ, 1); tree tid = lookup_template_function (name, targs); TREE_OPERAND (templ, 1) = tid; RETURN (templ); } if (concept_definition_p (templ)) { tree check = build_concept_check (templ, targs, complain); if (check == error_mark_node) RETURN (error_mark_node); tree id = unpack_concept_check (check); /* If we built a function concept check, return the underlying template-id. So we can evaluate it as a function call. */ if (function_concept_p (TREE_OPERAND (id, 0))) RETURN (id); RETURN (check); } if (variable_template_p (templ)) { tree r = lookup_and_finish_template_variable (templ, targs, complain); r = maybe_wrap_with_location (r, EXPR_LOCATION (t)); RETURN (r); } if (TREE_CODE (templ) == COMPONENT_REF) { object = TREE_OPERAND (templ, 0); templ = TREE_OPERAND (templ, 1); } else object = NULL_TREE; tree tid = lookup_template_function (templ, targs); if (object) RETURN (build3 (COMPONENT_REF, TREE_TYPE (tid), object, tid, NULL_TREE)); else if (identifier_p (templ)) { /* C++20 P0846: we can encounter an IDENTIFIER_NODE here when name lookup found nothing when parsing the template name. */ gcc_assert (cxx_dialect >= cxx20 || seen_error ()); RETURN (tid); } else RETURN (baselink_for_fns (tid)); } case INDIRECT_REF: { tree r = RECUR (TREE_OPERAND (t, 0)); if (REFERENCE_REF_P (t)) { /* A type conversion to reference type will be enclosed in such an indirect ref, but the substitution of the cast will have also added such an indirect ref. */ r = convert_from_reference (r); } else r = build_x_indirect_ref (input_location, r, RO_UNARY_STAR, complain|decltype_flag); if (REF_PARENTHESIZED_P (t)) r = force_paren_expr (r); RETURN (r); } case NOP_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = RECUR (TREE_OPERAND (t, 0)); RETURN (build_nop (type, op0)); } case IMPLICIT_CONV_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree expr = RECUR (TREE_OPERAND (t, 0)); if (dependent_type_p (type) || type_dependent_expression_p (expr)) { retval = copy_node (t); TREE_TYPE (retval) = type; TREE_OPERAND (retval, 0) = expr; RETURN (retval); } if (IMPLICIT_CONV_EXPR_NONTYPE_ARG (t)) /* We'll pass this to convert_nontype_argument again, we don't need to actually perform any conversion here. */ RETURN (expr); int flags = LOOKUP_IMPLICIT; if (IMPLICIT_CONV_EXPR_DIRECT_INIT (t)) flags = LOOKUP_NORMAL; if (IMPLICIT_CONV_EXPR_BRACED_INIT (t)) flags |= LOOKUP_NO_NARROWING; RETURN (perform_implicit_conversion_flags (type, expr, complain, flags)); } case CONVERT_EXPR: { tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); tree op0 = RECUR (TREE_OPERAND (t, 0)); if (op0 == error_mark_node) RETURN (error_mark_node); RETURN (build1 (CONVERT_EXPR, type, op0)); } case CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case DYNAMIC_CAST_EXPR: case STATIC_CAST_EXPR: { tree type; tree op, r = NULL_TREE; type = tsubst (TREE_TYPE (t), args, complain, in_decl); if (integral_constant_expression_p && !cast_valid_in_integral_constant_expression_p (type)) { if (complain & tf_error) error ("a cast to a type other than an integral or " "enumeration type cannot appear in a constant-expression"); RETURN (error_mark_node); } op = RECUR (TREE_OPERAND (t, 0)); warning_sentinel s(warn_useless_cast); warning_sentinel s2(warn_ignored_qualifiers); switch (TREE_CODE (t)) { case CAST_EXPR: r = build_functional_cast (input_location, type, op, complain); break; case REINTERPRET_CAST_EXPR: r = build_reinterpret_cast (input_location, type, op, complain); break; case CONST_CAST_EXPR: r = build_const_cast (input_location, type, op, complain); break; case DYNAMIC_CAST_EXPR: r = build_dynamic_cast (input_location, type, op, complain); break; case STATIC_CAST_EXPR: r = build_static_cast (input_location, type, op, complain); if (IMPLICIT_RVALUE_P (t)) set_implicit_rvalue_p (r); break; default: gcc_unreachable (); } RETURN (r); } case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); RETURN (build_x_unary_op (input_location, TREE_CODE (t), op1, complain|decltype_flag)); case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case NEGATE_EXPR: case BIT_NOT_EXPR: case ABS_EXPR: case TRUTH_NOT_EXPR: case UNARY_PLUS_EXPR: /* Unary + */ case REALPART_EXPR: case IMAGPART_EXPR: RETURN (build_x_unary_op (input_location, TREE_CODE (t), RECUR (TREE_OPERAND (t, 0)), complain|decltype_flag)); case FIX_TRUNC_EXPR: gcc_unreachable (); case ADDR_EXPR: op1 = TREE_OPERAND (t, 0); if (TREE_CODE (op1) == LABEL_DECL) RETURN (finish_label_address_expr (DECL_NAME (op1), EXPR_LOCATION (op1))); if (TREE_CODE (op1) == SCOPE_REF) op1 = tsubst_qualified_id (op1, args, complain, in_decl, /*done=*/true, /*address_p=*/true); else op1 = tsubst_non_call_postfix_expression (op1, args, complain, in_decl); RETURN (build_x_unary_op (input_location, ADDR_EXPR, op1, complain|decltype_flag)); case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case EXACT_DIV_EXPR: case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case TRUNC_MOD_EXPR: case FLOOR_MOD_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case RSHIFT_EXPR: case LSHIFT_EXPR: case EQ_EXPR: case NE_EXPR: case MAX_EXPR: case MIN_EXPR: case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR: case SPACESHIP_EXPR: case MEMBER_REF: case DOTSTAR_EXPR: { /* If T was type-dependent, suppress warnings that depend on the range of the types involved. */ bool was_dep = type_dependent_expression_p_push (t); tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); warning_sentinel s1(warn_type_limits, was_dep); warning_sentinel s2(warn_div_by_zero, was_dep); warning_sentinel s3(warn_logical_op, was_dep); warning_sentinel s4(warn_tautological_compare, was_dep); tree r = build_x_binary_op (input_location, TREE_CODE (t), op0, (TREE_NO_WARNING (TREE_OPERAND (t, 0)) ? ERROR_MARK : TREE_CODE (TREE_OPERAND (t, 0))), op1, (TREE_NO_WARNING (TREE_OPERAND (t, 1)) ? ERROR_MARK : TREE_CODE (TREE_OPERAND (t, 1))), /*overload=*/NULL, complain|decltype_flag); if (EXPR_P (r) && TREE_NO_WARNING (t)) TREE_NO_WARNING (r) = TREE_NO_WARNING (t); RETURN (r); } case POINTER_PLUS_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); if (op0 == error_mark_node) RETURN (error_mark_node); tree op1 = RECUR (TREE_OPERAND (t, 1)); if (op1 == error_mark_node) RETURN (error_mark_node); RETURN (fold_build_pointer_plus (op0, op1)); } case SCOPE_REF: RETURN (tsubst_qualified_id (t, args, complain, in_decl, /*done=*/true, /*address_p=*/false)); case ARRAY_REF: op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); RETURN (build_x_array_ref (EXPR_LOCATION (t), op1, RECUR (TREE_OPERAND (t, 1)), complain|decltype_flag)); case SIZEOF_EXPR: if (PACK_EXPANSION_P (TREE_OPERAND (t, 0)) || ARGUMENT_PACK_P (TREE_OPERAND (t, 0))) RETURN (tsubst_copy (t, args, complain, in_decl)); /* Fall through */ case ALIGNOF_EXPR: { tree r; op1 = TREE_OPERAND (t, 0); if (TREE_CODE (t) == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (t)) op1 = TREE_TYPE (op1); bool std_alignof = (TREE_CODE (t) == ALIGNOF_EXPR && ALIGNOF_EXPR_STD_P (t)); if (!args) { /* When there are no ARGS, we are trying to evaluate a non-dependent expression from the parser. Trying to do the substitutions may not work. */ if (!TYPE_P (op1)) op1 = TREE_TYPE (op1); } else { ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; if (TYPE_P (op1)) op1 = tsubst (op1, args, complain, in_decl); else op1 = tsubst_copy_and_build (op1, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/ false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; } if (TYPE_P (op1)) r = cxx_sizeof_or_alignof_type (input_location, op1, TREE_CODE (t), std_alignof, complain & tf_error); else r = cxx_sizeof_or_alignof_expr (input_location, op1, TREE_CODE (t), complain & tf_error); if (TREE_CODE (t) == SIZEOF_EXPR && r != error_mark_node) { if (TREE_CODE (r) != SIZEOF_EXPR || TYPE_P (op1)) { if (!processing_template_decl && TYPE_P (op1)) { r = build_min (SIZEOF_EXPR, size_type_node, build1 (NOP_EXPR, op1, error_mark_node)); SIZEOF_EXPR_TYPE_P (r) = 1; } else r = build_min (SIZEOF_EXPR, size_type_node, op1); TREE_SIDE_EFFECTS (r) = 0; TREE_READONLY (r) = 1; } SET_EXPR_LOCATION (r, EXPR_LOCATION (t)); } RETURN (r); } case AT_ENCODE_EXPR: { op1 = TREE_OPERAND (t, 0); ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; op1 = tsubst_copy_and_build (op1, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; RETURN (objc_build_encode_expr (op1)); } case NOEXCEPT_EXPR: op1 = TREE_OPERAND (t, 0); ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; ++cp_noexcept_operand; op1 = tsubst_copy_and_build (op1, args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; --cp_noexcept_operand; RETURN (finish_noexcept_expr (op1, complain)); case MODOP_EXPR: { warning_sentinel s(warn_div_by_zero); tree lhs = RECUR (TREE_OPERAND (t, 0)); tree rhs = RECUR (TREE_OPERAND (t, 2)); tree r = build_x_modify_expr (EXPR_LOCATION (t), lhs, TREE_CODE (TREE_OPERAND (t, 1)), rhs, complain|decltype_flag); /* TREE_NO_WARNING must be set if either the expression was parenthesized or it uses an operator such as >>= rather than plain assignment. In the former case, it was already set and must be copied. In the latter case, build_x_modify_expr sets it and it must not be reset here. */ if (TREE_NO_WARNING (t)) TREE_NO_WARNING (r) = TREE_NO_WARNING (t); RETURN (r); } case ARROW_EXPR: op1 = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); /* Remember that there was a reference to this entity. */ if (DECL_P (op1) && !mark_used (op1, complain) && !(complain & tf_error)) RETURN (error_mark_node); RETURN (build_x_arrow (input_location, op1, complain)); case NEW_EXPR: { tree placement = RECUR (TREE_OPERAND (t, 0)); tree init = RECUR (TREE_OPERAND (t, 3)); vec<tree, va_gc> *placement_vec; vec<tree, va_gc> *init_vec; tree ret; location_t loc = EXPR_LOCATION (t); if (placement == NULL_TREE) placement_vec = NULL; else if (placement == error_mark_node) RETURN (error_mark_node); else { placement_vec = make_tree_vector (); for (; placement != NULL_TREE; placement = TREE_CHAIN (placement)) vec_safe_push (placement_vec, TREE_VALUE (placement)); } /* If there was an initializer in the original tree, but it instantiated to an empty list, then we should pass a non-NULL empty vector to tell build_new that it was an empty initializer() rather than no initializer. This can only happen when the initializer is a pack expansion whose parameter packs are of length zero. */ if (init == NULL_TREE && TREE_OPERAND (t, 3) == NULL_TREE) init_vec = NULL; else { init_vec = make_tree_vector (); if (init == void_node) gcc_assert (init_vec != NULL); else { for (; init != NULL_TREE; init = TREE_CHAIN (init)) vec_safe_push (init_vec, TREE_VALUE (init)); } } /* Avoid passing an enclosing decl to valid_array_size_p. */ in_decl = NULL_TREE; tree op1 = tsubst (TREE_OPERAND (t, 1), args, complain, in_decl); tree op2 = RECUR (TREE_OPERAND (t, 2)); ret = build_new (loc, &placement_vec, op1, op2, &init_vec, NEW_EXPR_USE_GLOBAL (t), complain); if (placement_vec != NULL) release_tree_vector (placement_vec); if (init_vec != NULL) release_tree_vector (init_vec); RETURN (ret); } case DELETE_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); RETURN (delete_sanity (input_location, op0, op1, DELETE_EXPR_USE_VEC (t), DELETE_EXPR_USE_GLOBAL (t), complain)); } case COMPOUND_EXPR: { tree op0 = tsubst_copy_and_build (TREE_OPERAND (t, 0), args, complain & ~tf_decltype, in_decl, /*function_p=*/false, integral_constant_expression_p); RETURN (build_x_compound_expr (EXPR_LOCATION (t), op0, RECUR (TREE_OPERAND (t, 1)), complain|decltype_flag)); } case CALL_EXPR: { tree function; unsigned int nargs, i; bool qualified_p; bool koenig_p; tree ret; function = CALL_EXPR_FN (t); /* Internal function with no arguments. */ if (function == NULL_TREE && call_expr_nargs (t) == 0) RETURN (t); /* When we parsed the expression, we determined whether or not Koenig lookup should be performed. */ koenig_p = KOENIG_LOOKUP_P (t); if (function == NULL_TREE) { koenig_p = false; qualified_p = false; } else if (TREE_CODE (function) == SCOPE_REF) { qualified_p = true; function = tsubst_qualified_id (function, args, complain, in_decl, /*done=*/false, /*address_p=*/false); } else if (koenig_p && identifier_p (function)) { /* Do nothing; calling tsubst_copy_and_build on an identifier would incorrectly perform unqualified lookup again. Note that we can also have an IDENTIFIER_NODE if the earlier unqualified lookup found a member function; in that case koenig_p will be false and we do want to do the lookup again to find the instantiated member function. FIXME but doing that causes c++/15272, so we need to stop using IDENTIFIER_NODE in that situation. */ qualified_p = false; } else { if (TREE_CODE (function) == COMPONENT_REF) { tree op = TREE_OPERAND (function, 1); qualified_p = (TREE_CODE (op) == SCOPE_REF || (BASELINK_P (op) && BASELINK_QUALIFIED_P (op))); } else qualified_p = false; if (TREE_CODE (function) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL) /* Avoid error about taking the address of a constructor. */ function = TREE_OPERAND (function, 0); function = tsubst_copy_and_build (function, args, complain, in_decl, !qualified_p, integral_constant_expression_p); if (BASELINK_P (function)) qualified_p = true; } nargs = call_expr_nargs (t); releasing_vec call_args; for (i = 0; i < nargs; ++i) { tree arg = CALL_EXPR_ARG (t, i); if (!PACK_EXPANSION_P (arg)) vec_safe_push (call_args, RECUR (CALL_EXPR_ARG (t, i))); else { /* Expand the pack expansion and push each entry onto CALL_ARGS. */ arg = tsubst_pack_expansion (arg, args, complain, in_decl); if (TREE_CODE (arg) == TREE_VEC) { unsigned int len, j; len = TREE_VEC_LENGTH (arg); for (j = 0; j < len; ++j) { tree value = TREE_VEC_ELT (arg, j); if (value != NULL_TREE) value = convert_from_reference (value); vec_safe_push (call_args, value); } } else { /* A partial substitution. Add one entry. */ vec_safe_push (call_args, arg); } } } /* Stripped-down processing for a call in a thunk. Specifically, in the thunk template for a generic lambda. */ if (call_from_lambda_thunk_p (t)) { /* Now that we've expanded any packs, the number of call args might be different. */ unsigned int cargs = call_args->length (); tree thisarg = NULL_TREE; if (TREE_CODE (function) == COMPONENT_REF) { thisarg = TREE_OPERAND (function, 0); if (TREE_CODE (thisarg) == INDIRECT_REF) thisarg = TREE_OPERAND (thisarg, 0); function = TREE_OPERAND (function, 1); if (TREE_CODE (function) == BASELINK) function = BASELINK_FUNCTIONS (function); } /* We aren't going to do normal overload resolution, so force the template-id to resolve. */ function = resolve_nondeduced_context (function, complain); for (unsigned i = 0; i < cargs; ++i) { /* In a thunk, pass through args directly, without any conversions. */ tree arg = (*call_args)[i]; while (TREE_CODE (arg) != PARM_DECL) arg = TREE_OPERAND (arg, 0); (*call_args)[i] = arg; } if (thisarg) { /* If there are no other args, just push 'this'. */ if (cargs == 0) vec_safe_push (call_args, thisarg); else { /* Otherwise, shift the other args over to make room. */ tree last = (*call_args)[cargs - 1]; vec_safe_push (call_args, last); for (int i = cargs - 1; i > 0; --i) (*call_args)[i] = (*call_args)[i - 1]; (*call_args)[0] = thisarg; } } ret = build_call_a (function, call_args->length (), call_args->address ()); /* The thunk location is not interesting. */ SET_EXPR_LOCATION (ret, UNKNOWN_LOCATION); CALL_FROM_THUNK_P (ret) = true; if (CLASS_TYPE_P (TREE_TYPE (ret))) CALL_EXPR_RETURN_SLOT_OPT (ret) = true; RETURN (ret); } /* We do not perform argument-dependent lookup if normal lookup finds a non-function, in accordance with the resolution of DR 218. */ if (koenig_p && ((is_overloaded_fn (function) /* If lookup found a member function, the Koenig lookup is not appropriate, even if an unqualified-name was used to denote the function. */ && !DECL_FUNCTION_MEMBER_P (get_first_fn (function))) || identifier_p (function) /* C++20 P0846: Lookup found nothing. */ || (TREE_CODE (function) == TEMPLATE_ID_EXPR && identifier_p (TREE_OPERAND (function, 0)))) /* Only do this when substitution turns a dependent call into a non-dependent call. */ && type_dependent_expression_p_push (t) && !any_type_dependent_arguments_p (call_args)) function = perform_koenig_lookup (function, call_args, tf_none); if (function != NULL_TREE && (identifier_p (function) || (TREE_CODE (function) == TEMPLATE_ID_EXPR && identifier_p (TREE_OPERAND (function, 0)))) && !any_type_dependent_arguments_p (call_args)) { if (TREE_CODE (function) == TEMPLATE_ID_EXPR) function = TREE_OPERAND (function, 0); if (koenig_p && (complain & tf_warning_or_error)) { /* For backwards compatibility and good diagnostics, try the unqualified lookup again if we aren't in SFINAE context. */ tree unq = (tsubst_copy_and_build (function, args, complain, in_decl, true, integral_constant_expression_p)); if (unq == error_mark_node) RETURN (error_mark_node); if (unq != function) { /* In a lambda fn, we have to be careful to not introduce new this captures. Legacy code can't be using lambdas anyway, so it's ok to be stricter. */ bool in_lambda = (current_class_type && LAMBDA_TYPE_P (current_class_type)); char const *const msg = G_("%qD was not declared in this scope, " "and no declarations were found by " "argument-dependent lookup at the point " "of instantiation"); bool diag = true; if (in_lambda) error_at (cp_expr_loc_or_input_loc (t), msg, function); else diag = permerror (cp_expr_loc_or_input_loc (t), msg, function); if (diag) { tree fn = unq; if (INDIRECT_REF_P (fn)) fn = TREE_OPERAND (fn, 0); if (is_overloaded_fn (fn)) fn = get_first_fn (fn); if (!DECL_P (fn)) /* Can't say anything more. */; else if (DECL_CLASS_SCOPE_P (fn)) { location_t loc = cp_expr_loc_or_input_loc (t); inform (loc, "declarations in dependent base %qT are " "not found by unqualified lookup", DECL_CLASS_CONTEXT (fn)); if (current_class_ptr) inform (loc, "use %<this->%D%> instead", function); else inform (loc, "use %<%T::%D%> instead", current_class_name, function); } else inform (DECL_SOURCE_LOCATION (fn), "%qD declared here, later in the " "translation unit", fn); if (in_lambda) RETURN (error_mark_node); } function = unq; } } if (identifier_p (function)) { if (complain & tf_error) unqualified_name_lookup_error (function); RETURN (error_mark_node); } } /* Remember that there was a reference to this entity. */ if (function != NULL_TREE && DECL_P (function) && !mark_used (function, complain) && !(complain & tf_error)) RETURN (error_mark_node); /* Put back tf_decltype for the actual call. */ complain |= decltype_flag; if (function == NULL_TREE) switch (CALL_EXPR_IFN (t)) { case IFN_LAUNDER: gcc_assert (nargs == 1); if (vec_safe_length (call_args) != 1) { error_at (cp_expr_loc_or_input_loc (t), "wrong number of arguments to " "%<__builtin_launder%>"); ret = error_mark_node; } else ret = finish_builtin_launder (cp_expr_loc_or_input_loc (t), (*call_args)[0], complain); break; case IFN_VEC_CONVERT: gcc_assert (nargs == 1); if (vec_safe_length (call_args) != 1) { error_at (cp_expr_loc_or_input_loc (t), "wrong number of arguments to " "%<__builtin_convertvector%>"); ret = error_mark_node; break; } ret = cp_build_vec_convert ((*call_args)[0], input_location, tsubst (TREE_TYPE (t), args, complain, in_decl), complain); if (TREE_CODE (ret) == VIEW_CONVERT_EXPR) RETURN (ret); break; default: /* Unsupported internal function with arguments. */ gcc_unreachable (); } else if (TREE_CODE (function) == OFFSET_REF || TREE_CODE (function) == DOTSTAR_EXPR || TREE_CODE (function) == MEMBER_REF) ret = build_offset_ref_call_from_tree (function, &call_args, complain); else if (TREE_CODE (function) == COMPONENT_REF) { tree instance = TREE_OPERAND (function, 0); tree fn = TREE_OPERAND (function, 1); if (processing_template_decl && (type_dependent_expression_p (instance) || (!BASELINK_P (fn) && TREE_CODE (fn) != FIELD_DECL) || type_dependent_expression_p (fn) || any_type_dependent_arguments_p (call_args))) ret = build_min_nt_call_vec (function, call_args); else if (!BASELINK_P (fn)) ret = finish_call_expr (function, &call_args, /*disallow_virtual=*/false, /*koenig_p=*/false, complain); else ret = (build_new_method_call (instance, fn, &call_args, NULL_TREE, qualified_p ? LOOKUP_NONVIRTUAL : LOOKUP_NORMAL, /*fn_p=*/NULL, complain)); } else if (concept_check_p (function)) { /* FUNCTION is a template-id referring to a concept definition. */ tree id = unpack_concept_check (function); tree tmpl = TREE_OPERAND (id, 0); tree args = TREE_OPERAND (id, 1); /* Calls to standard and variable concepts should have been previously diagnosed. */ gcc_assert (function_concept_p (tmpl)); /* Ensure the result is wrapped as a call expression. */ ret = build_concept_check (tmpl, args, tf_warning_or_error); } else ret = finish_call_expr (function, &call_args, /*disallow_virtual=*/qualified_p, koenig_p, complain); if (ret != error_mark_node) { bool op = CALL_EXPR_OPERATOR_SYNTAX (t); bool ord = CALL_EXPR_ORDERED_ARGS (t); bool rev = CALL_EXPR_REVERSE_ARGS (t); if (op || ord || rev) { function = extract_call_expr (ret); CALL_EXPR_OPERATOR_SYNTAX (function) = op; CALL_EXPR_ORDERED_ARGS (function) = ord; CALL_EXPR_REVERSE_ARGS (function) = rev; } } RETURN (ret); } case COND_EXPR: { tree cond = RECUR (TREE_OPERAND (t, 0)); cond = mark_rvalue_use (cond); tree folded_cond = fold_non_dependent_expr (cond, complain); tree exp1, exp2; if (TREE_CODE (folded_cond) == INTEGER_CST) { if (integer_zerop (folded_cond)) { ++c_inhibit_evaluation_warnings; exp1 = RECUR (TREE_OPERAND (t, 1)); --c_inhibit_evaluation_warnings; exp2 = RECUR (TREE_OPERAND (t, 2)); } else { exp1 = RECUR (TREE_OPERAND (t, 1)); ++c_inhibit_evaluation_warnings; exp2 = RECUR (TREE_OPERAND (t, 2)); --c_inhibit_evaluation_warnings; } cond = folded_cond; } else { exp1 = RECUR (TREE_OPERAND (t, 1)); exp2 = RECUR (TREE_OPERAND (t, 2)); } warning_sentinel s(warn_duplicated_branches); RETURN (build_x_conditional_expr (EXPR_LOCATION (t), cond, exp1, exp2, complain)); } case PSEUDO_DTOR_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); tree op2 = tsubst (TREE_OPERAND (t, 2), args, complain, in_decl); RETURN (finish_pseudo_destructor_expr (op0, op1, op2, input_location)); } case TREE_LIST: RETURN (tsubst_tree_list (t, args, complain, in_decl)); case COMPONENT_REF: { tree object; tree object_type; tree member; tree r; object = tsubst_non_call_postfix_expression (TREE_OPERAND (t, 0), args, complain, in_decl); /* Remember that there was a reference to this entity. */ if (DECL_P (object) && !mark_used (object, complain) && !(complain & tf_error)) RETURN (error_mark_node); object_type = TREE_TYPE (object); member = TREE_OPERAND (t, 1); if (BASELINK_P (member)) member = tsubst_baselink (member, non_reference (TREE_TYPE (object)), args, complain, in_decl); else member = tsubst_copy (member, args, complain, in_decl); if (member == error_mark_node) RETURN (error_mark_node); if (TREE_CODE (member) == FIELD_DECL) { r = finish_non_static_data_member (member, object, NULL_TREE); if (TREE_CODE (r) == COMPONENT_REF) REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t); RETURN (r); } else if (type_dependent_expression_p (object)) /* We can't do much here. */; else if (!CLASS_TYPE_P (object_type)) { if (scalarish_type_p (object_type)) { tree s = NULL_TREE; tree dtor = member; if (TREE_CODE (dtor) == SCOPE_REF) { s = TREE_OPERAND (dtor, 0); dtor = TREE_OPERAND (dtor, 1); } if (TREE_CODE (dtor) == BIT_NOT_EXPR) { dtor = TREE_OPERAND (dtor, 0); if (TYPE_P (dtor)) RETURN (finish_pseudo_destructor_expr (object, s, dtor, input_location)); } } } else if (TREE_CODE (member) == SCOPE_REF && TREE_CODE (TREE_OPERAND (member, 1)) == TEMPLATE_ID_EXPR) { /* Lookup the template functions now that we know what the scope is. */ tree scope = TREE_OPERAND (member, 0); tree tmpl = TREE_OPERAND (TREE_OPERAND (member, 1), 0); tree args = TREE_OPERAND (TREE_OPERAND (member, 1), 1); member = lookup_qualified_name (scope, tmpl, LOOK_want::NORMAL, /*complain=*/false); if (BASELINK_P (member)) { BASELINK_FUNCTIONS (member) = build_nt (TEMPLATE_ID_EXPR, BASELINK_FUNCTIONS (member), args); member = (adjust_result_of_qualified_name_lookup (member, BINFO_TYPE (BASELINK_BINFO (member)), object_type)); } else { qualified_name_lookup_error (scope, tmpl, member, input_location); RETURN (error_mark_node); } } else if (TREE_CODE (member) == SCOPE_REF && !CLASS_TYPE_P (TREE_OPERAND (member, 0)) && TREE_CODE (TREE_OPERAND (member, 0)) != NAMESPACE_DECL) { if (complain & tf_error) { if (TYPE_P (TREE_OPERAND (member, 0))) error ("%qT is not a class or namespace", TREE_OPERAND (member, 0)); else error ("%qD is not a class or namespace", TREE_OPERAND (member, 0)); } RETURN (error_mark_node); } r = finish_class_member_access_expr (object, member, /*template_p=*/false, complain); if (TREE_CODE (r) == COMPONENT_REF) REF_PARENTHESIZED_P (r) = REF_PARENTHESIZED_P (t); RETURN (r); } case THROW_EXPR: RETURN (build_throw (input_location, RECUR (TREE_OPERAND (t, 0)))); case CONSTRUCTOR: { vec<constructor_elt, va_gc> *n; constructor_elt *ce; unsigned HOST_WIDE_INT idx; tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); bool process_index_p; int newlen; bool need_copy_p = false; tree r; if (type == error_mark_node) RETURN (error_mark_node); /* We do not want to process the index of aggregate initializers as they are identifier nodes which will be looked up by digest_init. */ process_index_p = !(type && MAYBE_CLASS_TYPE_P (type)); if (null_member_pointer_value_p (t)) { gcc_assert (same_type_p (type, TREE_TYPE (t))); RETURN (t); } n = vec_safe_copy (CONSTRUCTOR_ELTS (t)); newlen = vec_safe_length (n); FOR_EACH_VEC_SAFE_ELT (n, idx, ce) { if (ce->index && process_index_p /* An identifier index is looked up in the type being initialized, not the current scope. */ && TREE_CODE (ce->index) != IDENTIFIER_NODE) ce->index = RECUR (ce->index); if (PACK_EXPANSION_P (ce->value)) { /* Substitute into the pack expansion. */ ce->value = tsubst_pack_expansion (ce->value, args, complain, in_decl); if (ce->value == error_mark_node || PACK_EXPANSION_P (ce->value)) ; else if (TREE_VEC_LENGTH (ce->value) == 1) /* Just move the argument into place. */ ce->value = TREE_VEC_ELT (ce->value, 0); else { /* Update the length of the final CONSTRUCTOR arguments vector, and note that we will need to copy.*/ newlen = newlen + TREE_VEC_LENGTH (ce->value) - 1; need_copy_p = true; } } else ce->value = RECUR (ce->value); } if (need_copy_p) { vec<constructor_elt, va_gc> *old_n = n; vec_alloc (n, newlen); FOR_EACH_VEC_ELT (*old_n, idx, ce) { if (TREE_CODE (ce->value) == TREE_VEC) { int i, len = TREE_VEC_LENGTH (ce->value); for (i = 0; i < len; ++i) CONSTRUCTOR_APPEND_ELT (n, 0, TREE_VEC_ELT (ce->value, i)); } else CONSTRUCTOR_APPEND_ELT (n, 0, ce->value); } } r = build_constructor (init_list_type_node, n); CONSTRUCTOR_IS_DIRECT_INIT (r) = CONSTRUCTOR_IS_DIRECT_INIT (t); CONSTRUCTOR_IS_DESIGNATED_INIT (r) = CONSTRUCTOR_IS_DESIGNATED_INIT (t); if (TREE_HAS_CONSTRUCTOR (t)) { fcl_t cl = fcl_functional; if (CONSTRUCTOR_C99_COMPOUND_LITERAL (t)) cl = fcl_c99; RETURN (finish_compound_literal (type, r, complain, cl)); } TREE_TYPE (r) = type; RETURN (r); } case TYPEID_EXPR: { tree operand_0 = TREE_OPERAND (t, 0); if (TYPE_P (operand_0)) { operand_0 = tsubst (operand_0, args, complain, in_decl); RETURN (get_typeid (operand_0, complain)); } else { operand_0 = RECUR (operand_0); RETURN (build_typeid (operand_0, complain)); } } case VAR_DECL: if (!args) RETURN (t); /* Fall through */ case PARM_DECL: { tree r = tsubst_copy (t, args, complain, in_decl); /* ??? We're doing a subset of finish_id_expression here. */ if (tree wrap = maybe_get_tls_wrapper_call (r)) /* Replace an evaluated use of the thread_local variable with a call to its wrapper. */ r = wrap; else if (outer_automatic_var_p (r)) r = process_outer_var_ref (r, complain); if (!TYPE_REF_P (TREE_TYPE (t))) /* If the original type was a reference, we'll be wrapped in the appropriate INDIRECT_REF. */ r = convert_from_reference (r); RETURN (r); } case VA_ARG_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); RETURN (build_x_va_arg (EXPR_LOCATION (t), op0, type)); } case OFFSETOF_EXPR: { tree object_ptr = tsubst_copy_and_build (TREE_OPERAND (t, 1), args, complain, in_decl, /*function_p=*/false, /*integral_constant_expression_p=*/false); RETURN (finish_offsetof (object_ptr, RECUR (TREE_OPERAND (t, 0)), EXPR_LOCATION (t))); } case ADDRESSOF_EXPR: RETURN (cp_build_addressof (EXPR_LOCATION (t), RECUR (TREE_OPERAND (t, 0)), complain)); case TRAIT_EXPR: { tree type1 = tsubst (TRAIT_EXPR_TYPE1 (t), args, complain, in_decl); tree type2 = tsubst (TRAIT_EXPR_TYPE2 (t), args, complain, in_decl); RETURN (finish_trait_expr (TRAIT_EXPR_LOCATION (t), TRAIT_EXPR_KIND (t), type1, type2)); } case STMT_EXPR: { tree old_stmt_expr = cur_stmt_expr; tree stmt_expr = begin_stmt_expr (); cur_stmt_expr = stmt_expr; tsubst_expr (STMT_EXPR_STMT (t), args, complain, in_decl, integral_constant_expression_p); stmt_expr = finish_stmt_expr (stmt_expr, false); cur_stmt_expr = old_stmt_expr; /* If the resulting list of expression statement is empty, fold it further into void_node. */ if (empty_expr_stmt_p (stmt_expr)) stmt_expr = void_node; RETURN (stmt_expr); } case LAMBDA_EXPR: { if (complain & tf_partial) { /* We don't have a full set of template arguments yet; don't touch the lambda at all. */ gcc_assert (processing_template_decl); return t; } tree r = tsubst_lambda_expr (t, args, complain, in_decl); RETURN (build_lambda_object (r)); } case TARGET_EXPR: /* We can get here for a constant initializer of non-dependent type. FIXME stop folding in cp_parser_initializer_clause. */ { tree r = get_target_expr_sfinae (RECUR (TARGET_EXPR_INITIAL (t)), complain); RETURN (r); } case TRANSACTION_EXPR: RETURN (tsubst_expr(t, args, complain, in_decl, integral_constant_expression_p)); case PAREN_EXPR: RETURN (finish_parenthesized_expr (RECUR (TREE_OPERAND (t, 0)))); case VEC_PERM_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); tree op1 = RECUR (TREE_OPERAND (t, 1)); tree op2 = RECUR (TREE_OPERAND (t, 2)); RETURN (build_x_vec_perm_expr (input_location, op0, op1, op2, complain)); } case REQUIRES_EXPR: { tree r = tsubst_requires_expr (t, args, tf_none, in_decl); RETURN (r); } case RANGE_EXPR: /* No need to substitute further, a RANGE_EXPR will always be built with constant operands. */ RETURN (t); case NON_LVALUE_EXPR: case VIEW_CONVERT_EXPR: if (location_wrapper_p (t)) /* We need to do this here as well as in tsubst_copy so we get the other tsubst_copy_and_build semantics for a PARM_DECL operand. */ RETURN (maybe_wrap_with_location (RECUR (TREE_OPERAND (t, 0)), EXPR_LOCATION (t))); /* fallthrough. */ default: /* Handle Objective-C++ constructs, if appropriate. */ { tree subst = objcp_tsubst_copy_and_build (t, args, complain, in_decl, /*function_p=*/false); if (subst) RETURN (subst); } RETURN (tsubst_copy (t, args, complain, in_decl)); } #undef RECUR #undef RETURN out: input_location = save_loc; return retval; } /* Verify that the instantiated ARGS are valid. For type arguments, make sure that the type's linkage is ok. For non-type arguments, make sure they are constants if they are integral or enumerations. Emit an error under control of COMPLAIN, and return TRUE on error. */ static bool check_instantiated_arg (tree tmpl, tree t, tsubst_flags_t complain) { if (dependent_template_arg_p (t)) return false; if (ARGUMENT_PACK_P (t)) { tree vec = ARGUMENT_PACK_ARGS (t); int len = TREE_VEC_LENGTH (vec); bool result = false; int i; for (i = 0; i < len; ++i) if (check_instantiated_arg (tmpl, TREE_VEC_ELT (vec, i), complain)) result = true; return result; } else if (TYPE_P (t)) { /* [basic.link]: A name with no linkage (notably, the name of a class or enumeration declared in a local scope) shall not be used to declare an entity with linkage. This implies that names with no linkage cannot be used as template arguments DR 757 relaxes this restriction for C++0x. */ tree nt = (cxx_dialect > cxx98 ? NULL_TREE : no_linkage_check (t, /*relaxed_p=*/false)); if (nt) { /* DR 488 makes use of a type with no linkage cause type deduction to fail. */ if (complain & tf_error) { if (TYPE_UNNAMED_P (nt)) error ("%qT is/uses unnamed type", t); else error ("template argument for %qD uses local type %qT", tmpl, t); } return true; } /* In order to avoid all sorts of complications, we do not allow variably-modified types as template arguments. */ else if (variably_modified_type_p (t, NULL_TREE)) { if (complain & tf_error) error ("%qT is a variably modified type", t); return true; } } /* Class template and alias template arguments should be OK. */ else if (DECL_TYPE_TEMPLATE_P (t)) ; /* A non-type argument of integral or enumerated type must be a constant. */ else if (TREE_TYPE (t) && INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (t)) && !REFERENCE_REF_P (t) && !TREE_CONSTANT (t)) { if (complain & tf_error) error ("integral expression %qE is not constant", t); return true; } return false; } static bool check_instantiated_args (tree tmpl, tree args, tsubst_flags_t complain) { int ix, len = DECL_NTPARMS (tmpl); bool result = false; for (ix = 0; ix != len; ix++) { if (check_instantiated_arg (tmpl, TREE_VEC_ELT (args, ix), complain)) result = true; } if (result && (complain & tf_error)) error (" trying to instantiate %qD", tmpl); return result; } /* We're out of SFINAE context now, so generate diagnostics for the access errors we saw earlier when instantiating D from TMPL and ARGS. */ static void recheck_decl_substitution (tree d, tree tmpl, tree args) { tree pattern = DECL_TEMPLATE_RESULT (tmpl); tree type = TREE_TYPE (pattern); location_t loc = input_location; push_access_scope (d); push_deferring_access_checks (dk_no_deferred); input_location = DECL_SOURCE_LOCATION (pattern); tsubst (type, args, tf_warning_or_error, d); input_location = loc; pop_deferring_access_checks (); pop_access_scope (d); } /* Instantiate the indicated variable, function, or alias template TMPL with the template arguments in TARG_PTR. */ static tree instantiate_template_1 (tree tmpl, tree orig_args, tsubst_flags_t complain) { tree targ_ptr = orig_args; tree fndecl; tree gen_tmpl; tree spec; bool access_ok = true; if (tmpl == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL); /* If this function is a clone, handle it specially. */ if (DECL_CLONED_FUNCTION_P (tmpl)) { tree spec; tree clone; /* Use DECL_ABSTRACT_ORIGIN because only FUNCTION_DECLs have DECL_CLONED_FUNCTION. */ spec = instantiate_template (DECL_ABSTRACT_ORIGIN (tmpl), targ_ptr, complain); if (spec == error_mark_node) return error_mark_node; /* Look for the clone. */ FOR_EACH_CLONE (clone, spec) if (DECL_NAME (clone) == DECL_NAME (tmpl)) return clone; /* We should always have found the clone by now. */ gcc_unreachable (); return NULL_TREE; } if (targ_ptr == error_mark_node) return error_mark_node; /* Check to see if we already have this specialization. */ gen_tmpl = most_general_template (tmpl); if (TMPL_ARGS_DEPTH (targ_ptr) < TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl))) /* targ_ptr only has the innermost template args, so add the outer ones from tmpl, which could be either a partial instantiation or gen_tmpl (in the case of a non-dependent call within a template definition). */ targ_ptr = (add_outermost_template_args (DECL_TI_ARGS (DECL_TEMPLATE_RESULT (tmpl)), targ_ptr)); /* It would be nice to avoid hashing here and then again in tsubst_decl, but it doesn't seem to be on the hot path. */ spec = retrieve_specialization (gen_tmpl, targ_ptr, 0); gcc_checking_assert (tmpl == gen_tmpl || ((fndecl = retrieve_specialization (tmpl, orig_args, 0)) == spec) || fndecl == NULL_TREE); if (spec != NULL_TREE) { if (FNDECL_HAS_ACCESS_ERRORS (spec)) { if (complain & tf_error) recheck_decl_substitution (spec, gen_tmpl, targ_ptr); return error_mark_node; } return spec; } if (check_instantiated_args (gen_tmpl, INNERMOST_TEMPLATE_ARGS (targ_ptr), complain)) return error_mark_node; /* We are building a FUNCTION_DECL, during which the access of its parameters and return types have to be checked. However this FUNCTION_DECL which is the desired context for access checking is not built yet. We solve this chicken-and-egg problem by deferring all checks until we have the FUNCTION_DECL. */ push_deferring_access_checks (dk_deferred); /* Instantiation of the function happens in the context of the function template, not the context of the overload resolution we're doing. */ push_to_top_level (); /* If there are dependent arguments, e.g. because we're doing partial ordering, make sure processing_template_decl stays set. */ if (uses_template_parms (targ_ptr)) ++processing_template_decl; if (DECL_CLASS_SCOPE_P (gen_tmpl)) { tree ctx = tsubst_aggr_type (DECL_CONTEXT (gen_tmpl), targ_ptr, complain, gen_tmpl, true); push_nested_class (ctx); } tree pattern = DECL_TEMPLATE_RESULT (gen_tmpl); fndecl = NULL_TREE; if (VAR_P (pattern)) { /* We need to determine if we're using a partial or explicit specialization now, because the type of the variable could be different. */ tree tid = lookup_template_variable (gen_tmpl, targ_ptr); tree elt = most_specialized_partial_spec (tid, complain); if (elt == error_mark_node) pattern = error_mark_node; else if (elt) { tree partial_tmpl = TREE_VALUE (elt); tree partial_args = TREE_PURPOSE (elt); tree partial_pat = DECL_TEMPLATE_RESULT (partial_tmpl); fndecl = tsubst (partial_pat, partial_args, complain, gen_tmpl); } } /* Substitute template parameters to obtain the specialization. */ if (fndecl == NULL_TREE) fndecl = tsubst (pattern, targ_ptr, complain, gen_tmpl); if (DECL_CLASS_SCOPE_P (gen_tmpl)) pop_nested_class (); pop_from_top_level (); if (fndecl == error_mark_node) { pop_deferring_access_checks (); return error_mark_node; } /* The DECL_TI_TEMPLATE should always be the immediate parent template, not the most general template. */ DECL_TI_TEMPLATE (fndecl) = tmpl; DECL_TI_ARGS (fndecl) = targ_ptr; /* Now we know the specialization, compute access previously deferred. Do no access control for inheriting constructors, as we already checked access for the inherited constructor. */ if (!(flag_new_inheriting_ctors && DECL_INHERITED_CTOR (fndecl))) { push_access_scope (fndecl); if (!perform_deferred_access_checks (complain)) access_ok = false; pop_access_scope (fndecl); } pop_deferring_access_checks (); /* If we've just instantiated the main entry point for a function, instantiate all the alternate entry points as well. We do this by cloning the instantiation of the main entry point, not by instantiating the template clones. */ if (tree chain = DECL_CHAIN (gen_tmpl)) if (DECL_P (chain) && DECL_CLONED_FUNCTION_P (chain)) clone_cdtor (fndecl, /*update_methods=*/false); if (!access_ok) { if (!(complain & tf_error)) { /* Remember to reinstantiate when we're out of SFINAE so the user can see the errors. */ FNDECL_HAS_ACCESS_ERRORS (fndecl) = true; } return error_mark_node; } return fndecl; } /* Wrapper for instantiate_template_1. */ tree instantiate_template (tree tmpl, tree orig_args, tsubst_flags_t complain) { tree ret; timevar_push (TV_TEMPLATE_INST); ret = instantiate_template_1 (tmpl, orig_args, complain); timevar_pop (TV_TEMPLATE_INST); return ret; } /* Instantiate the alias template TMPL with ARGS. Also push a template instantiation level, which instantiate_template doesn't do because functions and variables have sufficient context established by the callers. */ static tree instantiate_alias_template (tree tmpl, tree args, tsubst_flags_t complain) { if (tmpl == error_mark_node || args == error_mark_node) return error_mark_node; args = coerce_innermost_template_parms (DECL_TEMPLATE_PARMS (tmpl), args, tmpl, complain, /*require_all_args=*/true, /*use_default_args=*/true); /* FIXME check for satisfaction in check_instantiated_args. */ if (flag_concepts && !any_dependent_template_arguments_p (args) && !constraints_satisfied_p (tmpl, args)) { if (complain & tf_error) { auto_diagnostic_group d; error ("template constraint failure for %qD", tmpl); diagnose_constraints (input_location, tmpl, args); } return error_mark_node; } if (!push_tinst_level (tmpl, args)) return error_mark_node; tree r = instantiate_template (tmpl, args, complain); pop_tinst_level (); return r; } /* PARM is a template parameter pack for FN. Returns true iff PARM is used in a deducible way in the argument list of FN. */ static bool pack_deducible_p (tree parm, tree fn) { tree t = FUNCTION_FIRST_USER_PARMTYPE (fn); for (; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); tree packs; if (!PACK_EXPANSION_P (type)) continue; for (packs = PACK_EXPANSION_PARAMETER_PACKS (type); packs; packs = TREE_CHAIN (packs)) if (template_args_equal (TREE_VALUE (packs), parm)) { /* The template parameter pack is used in a function parameter pack. If this is the end of the parameter list, the template parameter pack is deducible. */ if (TREE_CHAIN (t) == void_list_node) return true; else /* Otherwise, not. Well, it could be deduced from a non-pack parameter, but doing so would end up with a deduction mismatch, so don't bother. */ return false; } } /* The template parameter pack isn't used in any function parameter packs, but it might be used deeper, e.g. tuple<Args...>. */ return true; } /* Subroutine of fn_type_unification: check non-dependent parms for convertibility. */ static int check_non_deducible_conversions (tree parms, const tree *args, unsigned nargs, tree fn, unification_kind_t strict, int flags, struct conversion **convs, bool explain_p) { /* Non-constructor methods need to leave a conversion for 'this', which isn't included in nargs here. */ unsigned offset = (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn) && !DECL_CONSTRUCTOR_P (fn)); for (unsigned ia = 0; parms && parms != void_list_node && ia < nargs; ) { tree parm = TREE_VALUE (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION && (!TREE_CHAIN (parms) || TREE_CHAIN (parms) == void_list_node)) /* For a function parameter pack that occurs at the end of the parameter-declaration-list, the type A of each remaining argument of the call is compared with the type P of the declarator-id of the function parameter pack. */ break; parms = TREE_CHAIN (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION) /* For a function parameter pack that does not occur at the end of the parameter-declaration-list, the type of the parameter pack is a non-deduced context. */ continue; if (!uses_template_parms (parm)) { tree arg = args[ia]; conversion **conv_p = convs ? &convs[ia+offset] : NULL; int lflags = conv_flags (ia, nargs, fn, arg, flags); if (check_non_deducible_conversion (parm, arg, strict, lflags, conv_p, explain_p)) return 1; } ++ia; } return 0; } /* The FN is a TEMPLATE_DECL for a function. ARGS is an array with NARGS elements of the arguments that are being used when calling it. TARGS is a vector into which the deduced template arguments are placed. Returns either a FUNCTION_DECL for the matching specialization of FN or NULL_TREE if no suitable specialization can be found. If EXPLAIN_P is true, diagnostics will be printed to explain why it failed. If FN is a conversion operator, or we are trying to produce a specific specialization, RETURN_TYPE is the return type desired. The EXPLICIT_TARGS are explicit template arguments provided via a template-id. The parameter STRICT is one of: DEDUCE_CALL: We are deducing arguments for a function call, as in [temp.deduct.call]. If RETURN_TYPE is non-null, we are deducing arguments for a call to the result of a conversion function template, as in [over.call.object]. DEDUCE_CONV: We are deducing arguments for a conversion function, as in [temp.deduct.conv]. DEDUCE_EXACT: We are deducing arguments when doing an explicit instantiation as in [temp.explicit], when determining an explicit specialization as in [temp.expl.spec], or when taking the address of a function template, as in [temp.deduct.funcaddr]. */ tree fn_type_unification (tree fn, tree explicit_targs, tree targs, const tree *args, unsigned int nargs, tree return_type, unification_kind_t strict, int flags, struct conversion **convs, bool explain_p, bool decltype_p) { tree parms; tree fntype; tree decl = NULL_TREE; tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); bool ok; static int deduction_depth; /* type_unification_real will pass back any access checks from default template argument substitution. */ vec<deferred_access_check, va_gc> *checks = NULL; /* We don't have all the template args yet. */ bool incomplete = true; tree orig_fn = fn; if (flag_new_inheriting_ctors) fn = strip_inheriting_ctors (fn); tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (fn); tree r = error_mark_node; tree full_targs = targs; if (TMPL_ARGS_DEPTH (targs) < TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (fn))) full_targs = (add_outermost_template_args (DECL_TI_ARGS (DECL_TEMPLATE_RESULT (fn)), targs)); if (decltype_p) complain |= tf_decltype; /* In C++0x, it's possible to have a function template whose type depends on itself recursively. This is most obvious with decltype, but can also occur with enumeration scope (c++/48969). So we need to catch infinite recursion and reject the substitution at deduction time; this function will return error_mark_node for any repeated substitution. This also catches excessive recursion such as when f<N> depends on f<N-1> across all integers, and returns error_mark_node for all the substitutions back up to the initial one. This is, of course, not reentrant. */ if (excessive_deduction_depth) return error_mark_node; ++deduction_depth; gcc_assert (TREE_CODE (fn) == TEMPLATE_DECL); fntype = TREE_TYPE (fn); if (explicit_targs) { /* [temp.deduct] The specified template arguments must match the template parameters in kind (i.e., type, nontype, template), and there must not be more arguments than there are parameters; otherwise type deduction fails. Nontype arguments must match the types of the corresponding nontype template parameters, or must be convertible to the types of the corresponding nontype parameters as specified in _temp.arg.nontype_, otherwise type deduction fails. All references in the function type of the function template to the corresponding template parameters are replaced by the specified template argument values. If a substitution in a template parameter or in the function type of the function template results in an invalid type, type deduction fails. */ int i, len = TREE_VEC_LENGTH (tparms); location_t loc = input_location; incomplete = false; if (explicit_targs == error_mark_node) goto fail; if (TMPL_ARGS_DEPTH (explicit_targs) < TMPL_ARGS_DEPTH (full_targs)) explicit_targs = add_outermost_template_args (full_targs, explicit_targs); /* Adjust any explicit template arguments before entering the substitution context. */ explicit_targs = (coerce_template_parms (tparms, explicit_targs, fn, complain|tf_partial, /*require_all_args=*/false, /*use_default_args=*/false)); if (explicit_targs == error_mark_node) goto fail; /* Substitute the explicit args into the function type. This is necessary so that, for instance, explicitly declared function arguments can match null pointed constants. If we were given an incomplete set of explicit args, we must not do semantic processing during substitution as we could create partial instantiations. */ for (i = 0; i < len; i++) { tree parm = TREE_VALUE (TREE_VEC_ELT (tparms, i)); bool parameter_pack = false; tree targ = TREE_VEC_ELT (explicit_targs, i); /* Dig out the actual parm. */ if (TREE_CODE (parm) == TYPE_DECL || TREE_CODE (parm) == TEMPLATE_DECL) { parm = TREE_TYPE (parm); parameter_pack = TEMPLATE_TYPE_PARAMETER_PACK (parm); } else if (TREE_CODE (parm) == PARM_DECL) { parm = DECL_INITIAL (parm); parameter_pack = TEMPLATE_PARM_PARAMETER_PACK (parm); } if (targ == NULL_TREE) /* No explicit argument for this template parameter. */ incomplete = true; else if (parameter_pack && pack_deducible_p (parm, fn)) { /* Mark the argument pack as "incomplete". We could still deduce more arguments during unification. We remove this mark in type_unification_real. */ ARGUMENT_PACK_INCOMPLETE_P(targ) = 1; ARGUMENT_PACK_EXPLICIT_ARGS (targ) = ARGUMENT_PACK_ARGS (targ); /* We have some incomplete argument packs. */ incomplete = true; } } if (incomplete) { if (!push_tinst_level (fn, explicit_targs)) { excessive_deduction_depth = true; goto fail; } ++processing_template_decl; input_location = DECL_SOURCE_LOCATION (fn); /* Ignore any access checks; we'll see them again in instantiate_template and they might have the wrong access path at this point. */ push_deferring_access_checks (dk_deferred); tsubst_flags_t ecomplain = complain | tf_partial | tf_fndecl_type; fntype = tsubst (TREE_TYPE (fn), explicit_targs, ecomplain, NULL_TREE); pop_deferring_access_checks (); input_location = loc; --processing_template_decl; pop_tinst_level (); if (fntype == error_mark_node) goto fail; } /* Place the explicitly specified arguments in TARGS. */ explicit_targs = INNERMOST_TEMPLATE_ARGS (explicit_targs); for (i = NUM_TMPL_ARGS (explicit_targs); i--;) TREE_VEC_ELT (targs, i) = TREE_VEC_ELT (explicit_targs, i); if (!incomplete && CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, NUM_TMPL_ARGS (explicit_targs)); } if (return_type && strict != DEDUCE_CALL) { tree *new_args = XALLOCAVEC (tree, nargs + 1); new_args[0] = return_type; memcpy (new_args + 1, args, nargs * sizeof (tree)); args = new_args; ++nargs; } if (!incomplete) goto deduced; /* Never do unification on the 'this' parameter. */ parms = skip_artificial_parms_for (fn, TYPE_ARG_TYPES (fntype)); if (return_type && strict == DEDUCE_CALL) { /* We're deducing for a call to the result of a template conversion function. The parms we really want are in return_type. */ if (INDIRECT_TYPE_P (return_type)) return_type = TREE_TYPE (return_type); parms = TYPE_ARG_TYPES (return_type); } else if (return_type) { parms = tree_cons (NULL_TREE, TREE_TYPE (fntype), parms); } /* We allow incomplete unification without an error message here because the standard doesn't seem to explicitly prohibit it. Our callers must be ready to deal with unification failures in any event. */ /* If we aren't explaining yet, push tinst context so we can see where any errors (e.g. from class instantiations triggered by instantiation of default template arguments) come from. If we are explaining, this context is redundant. */ if (!explain_p && !push_tinst_level (fn, targs)) { excessive_deduction_depth = true; goto fail; } ok = !type_unification_real (DECL_INNERMOST_TEMPLATE_PARMS (fn), full_targs, parms, args, nargs, /*subr=*/0, strict, &checks, explain_p); if (!explain_p) pop_tinst_level (); if (!ok) goto fail; /* Now that we have bindings for all of the template arguments, ensure that the arguments deduced for the template template parameters have compatible template parameter lists. We cannot check this property before we have deduced all template arguments, because the template parameter types of a template template parameter might depend on prior template parameters deduced after the template template parameter. The following ill-formed example illustrates this issue: template<typename T, template<T> class C> void f(C<5>, T); template<int N> struct X {}; void g() { f(X<5>(), 5l); // error: template argument deduction fails } The template parameter list of 'C' depends on the template type parameter 'T', but 'C' is deduced to 'X' before 'T' is deduced to 'long'. Thus, we can't check that 'C' cannot bind to 'X' at the time that we deduce 'C'. */ if (!template_template_parm_bindings_ok_p (DECL_INNERMOST_TEMPLATE_PARMS (fn), targs)) { unify_inconsistent_template_template_parameters (explain_p); goto fail; } deduced: /* CWG2369: Check satisfaction before non-deducible conversions. */ if (!constraints_satisfied_p (fn, targs)) { if (explain_p) diagnose_constraints (DECL_SOURCE_LOCATION (fn), fn, targs); goto fail; } /* DR 1391: All parameters have args, now check non-dependent parms for convertibility. We don't do this if all args were explicitly specified, as the standard says that we substitute explicit args immediately. */ if (incomplete && check_non_deducible_conversions (parms, args, nargs, fn, strict, flags, convs, explain_p)) goto fail; /* All is well so far. Now, check: [temp.deduct] When all template arguments have been deduced, all uses of template parameters in nondeduced contexts are replaced with the corresponding deduced argument values. If the substitution results in an invalid type, as described above, type deduction fails. */ if (!push_tinst_level (fn, targs)) { excessive_deduction_depth = true; goto fail; } /* Also collect access checks from the instantiation. */ reopen_deferring_access_checks (checks); decl = instantiate_template (fn, targs, complain); checks = get_deferred_access_checks (); pop_deferring_access_checks (); pop_tinst_level (); if (decl == error_mark_node) goto fail; /* Now perform any access checks encountered during substitution. */ push_access_scope (decl); ok = perform_access_checks (checks, complain); pop_access_scope (decl); if (!ok) goto fail; /* If we're looking for an exact match, check that what we got is indeed an exact match. It might not be if some template parameters are used in non-deduced contexts. But don't check for an exact match if we have dependent template arguments; in that case we're doing partial ordering, and we already know that we have two candidates that will provide the actual type. */ if (strict == DEDUCE_EXACT && !any_dependent_template_arguments_p (targs)) { tree substed = TREE_TYPE (decl); unsigned int i; tree sarg = skip_artificial_parms_for (decl, TYPE_ARG_TYPES (substed)); if (return_type) sarg = tree_cons (NULL_TREE, TREE_TYPE (substed), sarg); for (i = 0; i < nargs && sarg; ++i, sarg = TREE_CHAIN (sarg)) if (!same_type_p (args[i], TREE_VALUE (sarg))) { unify_type_mismatch (explain_p, args[i], TREE_VALUE (sarg)); goto fail; } } /* After doing deduction with the inherited constructor, actually return an instantiation of the inheriting constructor. */ if (orig_fn != fn) decl = instantiate_template (orig_fn, targs, complain); r = decl; fail: --deduction_depth; if (excessive_deduction_depth) { if (deduction_depth == 0) /* Reset once we're all the way out. */ excessive_deduction_depth = false; } return r; } /* Adjust types before performing type deduction, as described in [temp.deduct.call] and [temp.deduct.conv]. The rules in these two sections are symmetric. PARM is the type of a function parameter or the return type of the conversion function. ARG is the type of the argument passed to the call, or the type of the value initialized with the result of the conversion function. ARG_EXPR is the original argument expression, which may be null. */ static int maybe_adjust_types_for_deduction (unification_kind_t strict, tree* parm, tree* arg, tree arg_expr) { int result = 0; switch (strict) { case DEDUCE_CALL: break; case DEDUCE_CONV: /* Swap PARM and ARG throughout the remainder of this function; the handling is precisely symmetric since PARM will initialize ARG rather than vice versa. */ std::swap (parm, arg); break; case DEDUCE_EXACT: /* Core issue #873: Do the DR606 thing (see below) for these cases, too, but here handle it by stripping the reference from PARM rather than by adding it to ARG. */ if (TYPE_REF_P (*parm) && TYPE_REF_IS_RVALUE (*parm) && TREE_CODE (TREE_TYPE (*parm)) == TEMPLATE_TYPE_PARM && cp_type_quals (TREE_TYPE (*parm)) == TYPE_UNQUALIFIED && TYPE_REF_P (*arg) && !TYPE_REF_IS_RVALUE (*arg)) *parm = TREE_TYPE (*parm); /* Nothing else to do in this case. */ return 0; default: gcc_unreachable (); } if (!TYPE_REF_P (*parm)) { /* [temp.deduct.call] If P is not a reference type: --If A is an array type, the pointer type produced by the array-to-pointer standard conversion (_conv.array_) is used in place of A for type deduction; otherwise, --If A is a function type, the pointer type produced by the function-to-pointer standard conversion (_conv.func_) is used in place of A for type deduction; otherwise, --If A is a cv-qualified type, the top level cv-qualifiers of A's type are ignored for type deduction. */ if (TREE_CODE (*arg) == ARRAY_TYPE) *arg = build_pointer_type (TREE_TYPE (*arg)); else if (TREE_CODE (*arg) == FUNCTION_TYPE) *arg = build_pointer_type (*arg); else *arg = TYPE_MAIN_VARIANT (*arg); } /* [14.8.2.1/3 temp.deduct.call], "A forwarding reference is an rvalue reference to a cv-unqualified template parameter that does not represent a template parameter of a class template (during class template argument deduction (13.3.1.8)). If P is a forwarding reference and the argument is an lvalue, the type "lvalue reference to A" is used in place of A for type deduction. */ if (TYPE_REF_P (*parm) && TYPE_REF_IS_RVALUE (*parm) && TREE_CODE (TREE_TYPE (*parm)) == TEMPLATE_TYPE_PARM && !TEMPLATE_TYPE_PARM_FOR_CLASS (TREE_TYPE (*parm)) && cp_type_quals (TREE_TYPE (*parm)) == TYPE_UNQUALIFIED && (arg_expr ? lvalue_p (arg_expr) /* try_one_overload doesn't provide an arg_expr, but functions are always lvalues. */ : TREE_CODE (*arg) == FUNCTION_TYPE)) *arg = build_reference_type (*arg); /* [temp.deduct.call] If P is a cv-qualified type, the top level cv-qualifiers of P's type are ignored for type deduction. If P is a reference type, the type referred to by P is used for type deduction. */ *parm = TYPE_MAIN_VARIANT (*parm); if (TYPE_REF_P (*parm)) { *parm = TREE_TYPE (*parm); result |= UNIFY_ALLOW_OUTER_MORE_CV_QUAL; } /* DR 322. For conversion deduction, remove a reference type on parm too (which has been swapped into ARG). */ if (strict == DEDUCE_CONV && TYPE_REF_P (*arg)) *arg = TREE_TYPE (*arg); return result; } /* Subroutine of fn_type_unification. PARM is a function parameter of a template which doesn't contain any deducible template parameters; check if ARG is a suitable match for it. STRICT, FLAGS and EXPLAIN_P are as in unify_one_argument. */ static int check_non_deducible_conversion (tree parm, tree arg, int strict, int flags, struct conversion **conv_p, bool explain_p) { tree type; if (!TYPE_P (arg)) type = TREE_TYPE (arg); else type = arg; if (same_type_p (parm, type)) return unify_success (explain_p); tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); if (strict == DEDUCE_CONV) { if (can_convert_arg (type, parm, NULL_TREE, flags, complain)) return unify_success (explain_p); } else if (strict != DEDUCE_EXACT) { bool ok = false; tree conv_arg = TYPE_P (arg) ? NULL_TREE : arg; if (conv_p) /* Avoid recalculating this in add_function_candidate. */ ok = (*conv_p = good_conversion (parm, type, conv_arg, flags, complain)); else ok = can_convert_arg (parm, type, conv_arg, flags, complain); if (ok) return unify_success (explain_p); } if (strict == DEDUCE_EXACT) return unify_type_mismatch (explain_p, parm, arg); else return unify_arg_conversion (explain_p, parm, type, arg); } static bool uses_deducible_template_parms (tree type); /* Returns true iff the expression EXPR is one from which a template argument can be deduced. In other words, if it's an undecorated use of a template non-type parameter. */ static bool deducible_expression (tree expr) { /* Strip implicit conversions. */ while (CONVERT_EXPR_P (expr) || TREE_CODE (expr) == VIEW_CONVERT_EXPR) expr = TREE_OPERAND (expr, 0); return (TREE_CODE (expr) == TEMPLATE_PARM_INDEX); } /* Returns true iff the array domain DOMAIN uses a template parameter in a deducible way; that is, if it has a max value of <PARM> - 1. */ static bool deducible_array_bound (tree domain) { if (domain == NULL_TREE) return false; tree max = TYPE_MAX_VALUE (domain); if (TREE_CODE (max) != MINUS_EXPR) return false; return deducible_expression (TREE_OPERAND (max, 0)); } /* Returns true iff the template arguments ARGS use a template parameter in a deducible way. */ static bool deducible_template_args (tree args) { for (int i = 0; i < TREE_VEC_LENGTH (args); ++i) { bool deducible; tree elt = TREE_VEC_ELT (args, i); if (ARGUMENT_PACK_P (elt)) deducible = deducible_template_args (ARGUMENT_PACK_ARGS (elt)); else { if (PACK_EXPANSION_P (elt)) elt = PACK_EXPANSION_PATTERN (elt); if (TREE_CODE (elt) == TEMPLATE_TEMPLATE_PARM) deducible = true; else if (TYPE_P (elt)) deducible = uses_deducible_template_parms (elt); else deducible = deducible_expression (elt); } if (deducible) return true; } return false; } /* Returns true iff TYPE contains any deducible references to template parameters, as per 14.8.2.5. */ static bool uses_deducible_template_parms (tree type) { if (PACK_EXPANSION_P (type)) type = PACK_EXPANSION_PATTERN (type); /* T cv-list T TT<T> TT<i> TT<> */ if (TREE_CODE (type) == TEMPLATE_TYPE_PARM || TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) return true; /* T* T& T&& */ if (INDIRECT_TYPE_P (type)) return uses_deducible_template_parms (TREE_TYPE (type)); /* T[integer-constant ] type [i] */ if (TREE_CODE (type) == ARRAY_TYPE) return (uses_deducible_template_parms (TREE_TYPE (type)) || deducible_array_bound (TYPE_DOMAIN (type))); /* T type ::* type T::* T T::* T (type ::*)() type (T::*)() type (type ::*)(T) type (T::*)(T) T (type ::*)(T) T (T::*)() T (T::*)(T) */ if (TYPE_PTRMEM_P (type)) return (uses_deducible_template_parms (TYPE_PTRMEM_CLASS_TYPE (type)) || (uses_deducible_template_parms (TYPE_PTRMEM_POINTED_TO_TYPE (type)))); /* template-name <T> (where template-name refers to a class template) template-name <i> (where template-name refers to a class template) */ if (CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (type))) return deducible_template_args (INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type))); /* type (T) T() T(T) */ if (FUNC_OR_METHOD_TYPE_P (type)) { if (uses_deducible_template_parms (TREE_TYPE (type))) return true; tree parm = TYPE_ARG_TYPES (type); if (TREE_CODE (type) == METHOD_TYPE) parm = TREE_CHAIN (parm); for (; parm; parm = TREE_CHAIN (parm)) if (uses_deducible_template_parms (TREE_VALUE (parm))) return true; } return false; } /* Subroutine of type_unification_real and unify_pack_expansion to handle unification of a single P/A pair. Parameters are as for those functions. */ static int unify_one_argument (tree tparms, tree targs, tree parm, tree arg, int subr, unification_kind_t strict, bool explain_p) { tree arg_expr = NULL_TREE; int arg_strict; if (arg == error_mark_node || parm == error_mark_node) return unify_invalid (explain_p); if (arg == unknown_type_node) /* We can't deduce anything from this, but we might get all the template args from other function args. */ return unify_success (explain_p); /* Implicit conversions (Clause 4) will be performed on a function argument to convert it to the type of the corresponding function parameter if the parameter type contains no template-parameters that participate in template argument deduction. */ if (strict != DEDUCE_EXACT && TYPE_P (parm) && !uses_deducible_template_parms (parm)) /* For function parameters with no deducible template parameters, just return. We'll check non-dependent conversions later. */ return unify_success (explain_p); switch (strict) { case DEDUCE_CALL: arg_strict = (UNIFY_ALLOW_OUTER_LEVEL | UNIFY_ALLOW_MORE_CV_QUAL | UNIFY_ALLOW_DERIVED); break; case DEDUCE_CONV: arg_strict = UNIFY_ALLOW_LESS_CV_QUAL; break; case DEDUCE_EXACT: arg_strict = UNIFY_ALLOW_NONE; break; default: gcc_unreachable (); } /* We only do these transformations if this is the top-level parameter_type_list in a call or declaration matching; in other situations (nested function declarators, template argument lists) we won't be comparing a type to an expression, and we don't do any type adjustments. */ if (!subr) { if (!TYPE_P (arg)) { gcc_assert (TREE_TYPE (arg) != NULL_TREE); if (type_unknown_p (arg)) { /* [temp.deduct.type] A template-argument can be deduced from a pointer to function or pointer to member function argument if the set of overloaded functions does not contain function templates and at most one of a set of overloaded functions provides a unique match. */ resolve_overloaded_unification (tparms, targs, parm, arg, strict, arg_strict, explain_p); /* If a unique match was not found, this is a non-deduced context, so we still succeed. */ return unify_success (explain_p); } arg_expr = arg; arg = unlowered_expr_type (arg); if (arg == error_mark_node) return unify_invalid (explain_p); } arg_strict |= maybe_adjust_types_for_deduction (strict, &parm, &arg, arg_expr); } else if ((TYPE_P (parm) || TREE_CODE (parm) == TEMPLATE_DECL) != (TYPE_P (arg) || TREE_CODE (arg) == TEMPLATE_DECL)) return unify_template_argument_mismatch (explain_p, parm, arg); /* For deduction from an init-list we need the actual list. */ if (arg_expr && BRACE_ENCLOSED_INITIALIZER_P (arg_expr)) arg = arg_expr; return unify (tparms, targs, parm, arg, arg_strict, explain_p); } /* for_each_template_parm callback that always returns 0. */ static int zero_r (tree, void *) { return 0; } /* for_each_template_parm any_fn callback to handle deduction of a template type argument from the type of an array bound. */ static int array_deduction_r (tree t, void *data) { tree_pair_p d = (tree_pair_p)data; tree &tparms = d->purpose; tree &targs = d->value; if (TREE_CODE (t) == ARRAY_TYPE) if (tree dom = TYPE_DOMAIN (t)) if (tree max = TYPE_MAX_VALUE (dom)) { if (TREE_CODE (max) == MINUS_EXPR) max = TREE_OPERAND (max, 0); if (TREE_CODE (max) == TEMPLATE_PARM_INDEX) unify (tparms, targs, TREE_TYPE (max), size_type_node, UNIFY_ALLOW_NONE, /*explain*/false); } /* Keep walking. */ return 0; } /* Try to deduce any not-yet-deduced template type arguments from the type of an array bound. This is handled separately from unify because 14.8.2.5 says "The type of a type parameter is only deduced from an array bound if it is not otherwise deduced." */ static void try_array_deduction (tree tparms, tree targs, tree parm) { tree_pair_s data = { tparms, targs }; hash_set<tree> visited; for_each_template_parm (parm, zero_r, &data, &visited, /*nondeduced*/false, array_deduction_r); } /* Most parms like fn_type_unification. If SUBR is 1, we're being called recursively (to unify the arguments of a function or method parameter of a function template). CHECKS is a pointer to a vector of access checks encountered while substituting default template arguments. */ static int type_unification_real (tree tparms, tree full_targs, tree xparms, const tree *xargs, unsigned int xnargs, int subr, unification_kind_t strict, vec<deferred_access_check, va_gc> **checks, bool explain_p) { tree parm, arg; int i; int ntparms = TREE_VEC_LENGTH (tparms); int saw_undeduced = 0; tree parms; const tree *args; unsigned int nargs; unsigned int ia; gcc_assert (TREE_CODE (tparms) == TREE_VEC); gcc_assert (xparms == NULL_TREE || TREE_CODE (xparms) == TREE_LIST); gcc_assert (ntparms > 0); tree targs = INNERMOST_TEMPLATE_ARGS (full_targs); /* Reset the number of non-defaulted template arguments contained in TARGS. */ NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs) = NULL_TREE; again: parms = xparms; args = xargs; nargs = xnargs; ia = 0; while (parms && parms != void_list_node && ia < nargs) { parm = TREE_VALUE (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION && (!TREE_CHAIN (parms) || TREE_CHAIN (parms) == void_list_node)) /* For a function parameter pack that occurs at the end of the parameter-declaration-list, the type A of each remaining argument of the call is compared with the type P of the declarator-id of the function parameter pack. */ break; parms = TREE_CHAIN (parms); if (TREE_CODE (parm) == TYPE_PACK_EXPANSION) /* For a function parameter pack that does not occur at the end of the parameter-declaration-list, the type of the parameter pack is a non-deduced context. */ continue; arg = args[ia]; ++ia; if (unify_one_argument (tparms, full_targs, parm, arg, subr, strict, explain_p)) return 1; } if (parms && parms != void_list_node && TREE_CODE (TREE_VALUE (parms)) == TYPE_PACK_EXPANSION) { /* Unify the remaining arguments with the pack expansion type. */ tree argvec; tree parmvec = make_tree_vec (1); /* Allocate a TREE_VEC and copy in all of the arguments */ argvec = make_tree_vec (nargs - ia); for (i = 0; ia < nargs; ++ia, ++i) TREE_VEC_ELT (argvec, i) = args[ia]; /* Copy the parameter into parmvec. */ TREE_VEC_ELT (parmvec, 0) = TREE_VALUE (parms); if (unify_pack_expansion (tparms, full_targs, parmvec, argvec, strict, /*subr=*/subr, explain_p)) return 1; /* Advance to the end of the list of parameters. */ parms = TREE_CHAIN (parms); } /* Fail if we've reached the end of the parm list, and more args are present, and the parm list isn't variadic. */ if (ia < nargs && parms == void_list_node) return unify_too_many_arguments (explain_p, nargs, ia); /* Fail if parms are left and they don't have default values and they aren't all deduced as empty packs (c++/57397). This is consistent with sufficient_parms_p. */ if (parms && parms != void_list_node && TREE_PURPOSE (parms) == NULL_TREE) { unsigned int count = nargs; tree p = parms; bool type_pack_p; do { type_pack_p = TREE_CODE (TREE_VALUE (p)) == TYPE_PACK_EXPANSION; if (!type_pack_p) count++; p = TREE_CHAIN (p); } while (p && p != void_list_node); if (count != nargs) return unify_too_few_arguments (explain_p, ia, count, type_pack_p); } if (!subr) { tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); bool tried_array_deduction = (cxx_dialect < cxx17); for (i = 0; i < ntparms; i++) { tree targ = TREE_VEC_ELT (targs, i); tree tparm = TREE_VEC_ELT (tparms, i); /* Clear the "incomplete" flags on all argument packs now so that substituting them into later default arguments works. */ if (targ && ARGUMENT_PACK_P (targ)) { ARGUMENT_PACK_INCOMPLETE_P (targ) = 0; ARGUMENT_PACK_EXPLICIT_ARGS (targ) = NULL_TREE; } if (targ || tparm == error_mark_node) continue; tparm = TREE_VALUE (tparm); if (TREE_CODE (tparm) == TYPE_DECL && !tried_array_deduction) { try_array_deduction (tparms, targs, xparms); tried_array_deduction = true; if (TREE_VEC_ELT (targs, i)) continue; } /* If this is an undeduced nontype parameter that depends on a type parameter, try another pass; its type may have been deduced from a later argument than the one from which this parameter can be deduced. */ if (TREE_CODE (tparm) == PARM_DECL && uses_template_parms (TREE_TYPE (tparm)) && saw_undeduced < 2) { saw_undeduced = 1; continue; } /* Core issue #226 (C++0x) [temp.deduct]: If a template argument has not been deduced, its default template argument, if any, is used. When we are in C++98 mode, TREE_PURPOSE will either be NULL_TREE or ERROR_MARK_NODE, so we do not need to explicitly check cxx_dialect here. */ if (TREE_PURPOSE (TREE_VEC_ELT (tparms, i))) /* OK, there is a default argument. Wait until after the conversion check to do substitution. */ continue; /* If the type parameter is a parameter pack, then it will be deduced to an empty parameter pack. */ if (template_parameter_pack_p (tparm)) { tree arg; if (TREE_CODE (tparm) == TEMPLATE_PARM_INDEX) { arg = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (arg) = 1; } else arg = cxx_make_type (TYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (arg, make_tree_vec (0)); TREE_VEC_ELT (targs, i) = arg; continue; } return unify_parameter_deduction_failure (explain_p, tparm); } /* Now substitute into the default template arguments. */ for (i = 0; i < ntparms; i++) { tree targ = TREE_VEC_ELT (targs, i); tree tparm = TREE_VEC_ELT (tparms, i); if (targ || tparm == error_mark_node) continue; tree parm = TREE_VALUE (tparm); tree arg = TREE_PURPOSE (tparm); reopen_deferring_access_checks (*checks); location_t save_loc = input_location; if (DECL_P (parm)) input_location = DECL_SOURCE_LOCATION (parm); if (saw_undeduced == 1 && TREE_CODE (parm) == PARM_DECL && uses_template_parms (TREE_TYPE (parm))) { /* The type of this non-type parameter depends on undeduced parameters. Don't try to use its default argument yet, since we might deduce an argument for it on the next pass, but do check whether the arguments we already have cause substitution failure, so that that happens before we try later default arguments (78489). */ ++processing_template_decl; tree type = tsubst (TREE_TYPE (parm), full_targs, complain, NULL_TREE); --processing_template_decl; if (type == error_mark_node) arg = error_mark_node; else arg = NULL_TREE; } else { /* Even if the call is happening in template context, getting here means it's non-dependent, and a default argument is considered a separate definition under [temp.decls], so we can do this substitution without processing_template_decl. This is important if the default argument contains something that might be instantiation-dependent like access (87480). */ processing_template_decl_sentinel s; tree substed = NULL_TREE; if (saw_undeduced == 1) { /* First instatiate in template context, in case we still depend on undeduced template parameters. */ ++processing_template_decl; substed = tsubst_template_arg (arg, full_targs, complain, NULL_TREE); --processing_template_decl; if (substed != error_mark_node && !uses_template_parms (substed)) /* We replaced all the tparms, substitute again out of template context. */ substed = NULL_TREE; } if (!substed) substed = tsubst_template_arg (arg, full_targs, complain, NULL_TREE); if (!uses_template_parms (substed)) arg = convert_template_argument (parm, substed, full_targs, complain, i, NULL_TREE); else if (saw_undeduced == 1) arg = NULL_TREE; else arg = error_mark_node; } input_location = save_loc; *checks = get_deferred_access_checks (); pop_deferring_access_checks (); if (arg == error_mark_node) return 1; else if (arg) { TREE_VEC_ELT (targs, i) = arg; /* The position of the first default template argument, is also the number of non-defaulted arguments in TARGS. Record that. */ if (!NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, i); } } if (saw_undeduced++ == 1) goto again; } if (CHECKING_P && !NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs)) SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (targs, TREE_VEC_LENGTH (targs)); return unify_success (explain_p); } /* Subroutine of type_unification_real. Args are like the variables at the call site. ARG is an overloaded function (or template-id); we try deducing template args from each of the overloads, and if only one succeeds, we go with that. Modifies TARGS and returns true on success. */ static bool resolve_overloaded_unification (tree tparms, tree targs, tree parm, tree arg, unification_kind_t strict, int sub_strict, bool explain_p) { tree tempargs = copy_node (targs); int good = 0; tree goodfn = NULL_TREE; bool addr_p; if (TREE_CODE (arg) == ADDR_EXPR) { arg = TREE_OPERAND (arg, 0); addr_p = true; } else addr_p = false; if (TREE_CODE (arg) == COMPONENT_REF) /* Handle `&x' where `x' is some static or non-static member function name. */ arg = TREE_OPERAND (arg, 1); if (TREE_CODE (arg) == OFFSET_REF) arg = TREE_OPERAND (arg, 1); /* Strip baselink information. */ if (BASELINK_P (arg)) arg = BASELINK_FUNCTIONS (arg); if (TREE_CODE (arg) == TEMPLATE_ID_EXPR) { /* If we got some explicit template args, we need to plug them into the affected templates before we try to unify, in case the explicit args will completely resolve the templates in question. */ int ok = 0; tree expl_subargs = TREE_OPERAND (arg, 1); arg = TREE_OPERAND (arg, 0); for (lkp_iterator iter (arg); iter; ++iter) { tree fn = *iter; tree subargs, elem; if (TREE_CODE (fn) != TEMPLATE_DECL) continue; subargs = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (fn), expl_subargs, NULL_TREE, tf_none, /*require_all_args=*/true, /*use_default_args=*/true); if (subargs != error_mark_node && !any_dependent_template_arguments_p (subargs)) { fn = instantiate_template (fn, subargs, tf_none); if (!constraints_satisfied_p (fn)) continue; if (undeduced_auto_decl (fn)) { /* Instantiate the function to deduce its return type. */ ++function_depth; instantiate_decl (fn, /*defer*/false, /*class*/false); --function_depth; } elem = TREE_TYPE (fn); if (try_one_overload (tparms, targs, tempargs, parm, elem, strict, sub_strict, addr_p, explain_p) && (!goodfn || !same_type_p (goodfn, elem))) { goodfn = elem; ++good; } } else if (subargs) ++ok; } /* If no templates (or more than one) are fully resolved by the explicit arguments, this template-id is a non-deduced context; it could still be OK if we deduce all template arguments for the enclosing call through other arguments. */ if (good != 1) good = ok; } else if (!OVL_P (arg)) /* If ARG is, for example, "(0, &f)" then its type will be unknown -- but the deduction does not succeed because the expression is not just the function on its own. */ return false; else for (lkp_iterator iter (arg); iter; ++iter) { tree fn = *iter; if (try_one_overload (tparms, targs, tempargs, parm, TREE_TYPE (fn), strict, sub_strict, addr_p, explain_p) && (!goodfn || !decls_match (goodfn, fn))) { goodfn = fn; ++good; } } /* [temp.deduct.type] A template-argument can be deduced from a pointer to function or pointer to member function argument if the set of overloaded functions does not contain function templates and at most one of a set of overloaded functions provides a unique match. So if we found multiple possibilities, we return success but don't deduce anything. */ if (good == 1) { int i = TREE_VEC_LENGTH (targs); for (; i--; ) if (TREE_VEC_ELT (tempargs, i)) { tree old = TREE_VEC_ELT (targs, i); tree new_ = TREE_VEC_ELT (tempargs, i); if (new_ && old && ARGUMENT_PACK_P (old) && ARGUMENT_PACK_EXPLICIT_ARGS (old)) /* Don't forget explicit template arguments in a pack. */ ARGUMENT_PACK_EXPLICIT_ARGS (new_) = ARGUMENT_PACK_EXPLICIT_ARGS (old); TREE_VEC_ELT (targs, i) = new_; } } if (good) return true; return false; } /* Core DR 115: In contexts where deduction is done and fails, or in contexts where deduction is not done, if a template argument list is specified and it, along with any default template arguments, identifies a single function template specialization, then the template-id is an lvalue for the function template specialization. */ tree resolve_nondeduced_context (tree orig_expr, tsubst_flags_t complain) { tree expr, offset, baselink; bool addr; if (!type_unknown_p (orig_expr)) return orig_expr; expr = orig_expr; addr = false; offset = NULL_TREE; baselink = NULL_TREE; if (TREE_CODE (expr) == ADDR_EXPR) { expr = TREE_OPERAND (expr, 0); addr = true; } if (TREE_CODE (expr) == OFFSET_REF) { offset = expr; expr = TREE_OPERAND (expr, 1); } if (BASELINK_P (expr)) { baselink = expr; expr = BASELINK_FUNCTIONS (expr); } if (TREE_CODE (expr) == TEMPLATE_ID_EXPR) { int good = 0; tree goodfn = NULL_TREE; /* If we got some explicit template args, we need to plug them into the affected templates before we try to unify, in case the explicit args will completely resolve the templates in question. */ tree expl_subargs = TREE_OPERAND (expr, 1); tree arg = TREE_OPERAND (expr, 0); tree badfn = NULL_TREE; tree badargs = NULL_TREE; for (lkp_iterator iter (arg); iter; ++iter) { tree fn = *iter; tree subargs, elem; if (TREE_CODE (fn) != TEMPLATE_DECL) continue; subargs = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (fn), expl_subargs, NULL_TREE, tf_none, /*require_all_args=*/true, /*use_default_args=*/true); if (subargs != error_mark_node && !any_dependent_template_arguments_p (subargs)) { elem = instantiate_template (fn, subargs, tf_none); if (elem == error_mark_node) { badfn = fn; badargs = subargs; } else if (elem && (!goodfn || !decls_match (goodfn, elem)) && constraints_satisfied_p (elem)) { goodfn = elem; ++good; } } } if (good == 1) { mark_used (goodfn); expr = goodfn; if (baselink) expr = build_baselink (BASELINK_BINFO (baselink), BASELINK_ACCESS_BINFO (baselink), expr, BASELINK_OPTYPE (baselink)); if (offset) { tree base = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (offset, 0))); expr = build_offset_ref (base, expr, addr, complain); } if (addr) expr = cp_build_addr_expr (expr, complain); return expr; } else if (good == 0 && badargs && (complain & tf_error)) /* There were no good options and at least one bad one, so let the user know what the problem is. */ instantiate_template (badfn, badargs, complain); } return orig_expr; } /* As above, but error out if the expression remains overloaded. */ tree resolve_nondeduced_context_or_error (tree exp, tsubst_flags_t complain) { exp = resolve_nondeduced_context (exp, complain); if (type_unknown_p (exp)) { if (complain & tf_error) cxx_incomplete_type_error (exp, TREE_TYPE (exp)); return error_mark_node; } return exp; } /* Subroutine of resolve_overloaded_unification; does deduction for a single overload. Fills TARGS with any deduced arguments, or error_mark_node if different overloads deduce different arguments for a given parm. ADDR_P is true if the expression for which deduction is being performed was of the form "& fn" rather than simply "fn". Returns 1 on success. */ static int try_one_overload (tree tparms, tree orig_targs, tree targs, tree parm, tree arg, unification_kind_t strict, int sub_strict, bool addr_p, bool explain_p) { int nargs; tree tempargs; int i; if (arg == error_mark_node) return 0; /* [temp.deduct.type] A template-argument can be deduced from a pointer to function or pointer to member function argument if the set of overloaded functions does not contain function templates and at most one of a set of overloaded functions provides a unique match. So if this is a template, just return success. */ if (uses_template_parms (arg)) return 1; if (TREE_CODE (arg) == METHOD_TYPE) arg = build_ptrmemfunc_type (build_pointer_type (arg)); else if (addr_p) arg = build_pointer_type (arg); sub_strict |= maybe_adjust_types_for_deduction (strict, &parm, &arg, NULL); /* We don't copy orig_targs for this because if we have already deduced some template args from previous args, unify would complain when we try to deduce a template parameter for the same argument, even though there isn't really a conflict. */ nargs = TREE_VEC_LENGTH (targs); tempargs = make_tree_vec (nargs); if (unify (tparms, tempargs, parm, arg, sub_strict, explain_p)) return 0; /* First make sure we didn't deduce anything that conflicts with explicitly specified args. */ for (i = nargs; i--; ) { tree elt = TREE_VEC_ELT (tempargs, i); tree oldelt = TREE_VEC_ELT (orig_targs, i); if (!elt) /*NOP*/; else if (uses_template_parms (elt)) /* Since we're unifying against ourselves, we will fill in template args used in the function parm list with our own template parms. Discard them. */ TREE_VEC_ELT (tempargs, i) = NULL_TREE; else if (oldelt && ARGUMENT_PACK_P (oldelt)) { /* Check that the argument at each index of the deduced argument pack is equivalent to the corresponding explicitly specified argument. We may have deduced more arguments than were explicitly specified, and that's OK. */ /* We used to assert ARGUMENT_PACK_INCOMPLETE_P (oldelt) here, but that's wrong if we deduce the same argument pack from multiple function arguments: it's only incomplete the first time. */ tree explicit_pack = ARGUMENT_PACK_ARGS (oldelt); tree deduced_pack = ARGUMENT_PACK_ARGS (elt); if (TREE_VEC_LENGTH (deduced_pack) < TREE_VEC_LENGTH (explicit_pack)) return 0; for (int j = 0; j < TREE_VEC_LENGTH (explicit_pack); j++) if (!template_args_equal (TREE_VEC_ELT (explicit_pack, j), TREE_VEC_ELT (deduced_pack, j))) return 0; } else if (oldelt && !template_args_equal (oldelt, elt)) return 0; } for (i = nargs; i--; ) { tree elt = TREE_VEC_ELT (tempargs, i); if (elt) TREE_VEC_ELT (targs, i) = elt; } return 1; } /* PARM is a template class (perhaps with unbound template parameters). ARG is a fully instantiated type. If ARG can be bound to PARM, return ARG, otherwise return NULL_TREE. TPARMS and TARGS are as for unify. */ static tree try_class_unification (tree tparms, tree targs, tree parm, tree arg, bool explain_p) { tree copy_of_targs; if (!CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P (arg)) return NULL_TREE; else if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) /* Matches anything. */; else if (most_general_template (CLASSTYPE_TI_TEMPLATE (arg)) != most_general_template (CLASSTYPE_TI_TEMPLATE (parm))) return NULL_TREE; /* We need to make a new template argument vector for the call to unify. If we used TARGS, we'd clutter it up with the result of the attempted unification, even if this class didn't work out. We also don't want to commit ourselves to all the unifications we've already done, since unification is supposed to be done on an argument-by-argument basis. In other words, consider the following pathological case: template <int I, int J, int K> struct S {}; template <int I, int J> struct S<I, J, 2> : public S<I, I, I>, S<J, J, J> {}; template <int I, int J, int K> void f(S<I, J, K>, S<I, I, I>); void g() { S<0, 0, 0> s0; S<0, 1, 2> s2; f(s0, s2); } Now, by the time we consider the unification involving `s2', we already know that we must have `f<0, 0, 0>'. But, even though `S<0, 1, 2>' is derived from `S<0, 0, 0>', the code is invalid because there are two ways to unify base classes of S<0, 1, 2> with S<I, I, I>. If we kept the already deduced knowledge, we would reject the possibility I=1. */ copy_of_targs = make_tree_vec (TREE_VEC_LENGTH (targs)); if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { if (unify_bound_ttp_args (tparms, copy_of_targs, parm, arg, explain_p)) return NULL_TREE; return arg; } /* If unification failed, we're done. */ if (unify (tparms, copy_of_targs, CLASSTYPE_TI_ARGS (parm), CLASSTYPE_TI_ARGS (arg), UNIFY_ALLOW_NONE, explain_p)) return NULL_TREE; return arg; } /* Given a template type PARM and a class type ARG, find the unique base type in ARG that is an instance of PARM. We do not examine ARG itself; only its base-classes. If there is not exactly one appropriate base class, return NULL_TREE. PARM may be the type of a partial specialization, as well as a plain template type. Used by unify. */ static enum template_base_result get_template_base (tree tparms, tree targs, tree parm, tree arg, bool explain_p, tree *result) { tree rval = NULL_TREE; tree binfo; gcc_assert (RECORD_OR_UNION_CODE_P (TREE_CODE (arg))); binfo = TYPE_BINFO (complete_type (arg)); if (!binfo) { /* The type could not be completed. */ *result = NULL_TREE; return tbr_incomplete_type; } /* Walk in inheritance graph order. The search order is not important, and this avoids multiple walks of virtual bases. */ for (binfo = TREE_CHAIN (binfo); binfo; binfo = TREE_CHAIN (binfo)) { tree r = try_class_unification (tparms, targs, parm, BINFO_TYPE (binfo), explain_p); if (r) { /* If there is more than one satisfactory baseclass, then: [temp.deduct.call] If they yield more than one possible deduced A, the type deduction fails. applies. */ if (rval && !same_type_p (r, rval)) { *result = NULL_TREE; return tbr_ambiguous_baseclass; } rval = r; } } *result = rval; return tbr_success; } /* Returns the level of DECL, which declares a template parameter. */ static int template_decl_level (tree decl) { switch (TREE_CODE (decl)) { case TYPE_DECL: case TEMPLATE_DECL: return TEMPLATE_TYPE_LEVEL (TREE_TYPE (decl)); case PARM_DECL: return TEMPLATE_PARM_LEVEL (DECL_INITIAL (decl)); default: gcc_unreachable (); } return 0; } /* Decide whether ARG can be unified with PARM, considering only the cv-qualifiers of each type, given STRICT as documented for unify. Returns nonzero iff the unification is OK on that basis. */ static int check_cv_quals_for_unify (int strict, tree arg, tree parm) { int arg_quals = cp_type_quals (arg); int parm_quals = cp_type_quals (parm); if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM && !(strict & UNIFY_ALLOW_OUTER_MORE_CV_QUAL)) { /* Although a CVR qualifier is ignored when being applied to a substituted template parameter ([8.3.2]/1 for example), that does not allow us to unify "const T" with "int&" because both types are not of the form "cv-list T" [14.8.2.5 temp.deduct.type]. It is ok when we're allowing additional CV qualifiers at the outer level [14.8.2.1]/3,1st bullet. */ if ((TYPE_REF_P (arg) || FUNC_OR_METHOD_TYPE_P (arg)) && (parm_quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE))) return 0; if ((!INDIRECT_TYPE_P (arg) && TREE_CODE (arg) != TEMPLATE_TYPE_PARM) && (parm_quals & TYPE_QUAL_RESTRICT)) return 0; } if (!(strict & (UNIFY_ALLOW_MORE_CV_QUAL | UNIFY_ALLOW_OUTER_MORE_CV_QUAL)) && (arg_quals & parm_quals) != parm_quals) return 0; if (!(strict & (UNIFY_ALLOW_LESS_CV_QUAL | UNIFY_ALLOW_OUTER_LESS_CV_QUAL)) && (parm_quals & arg_quals) != arg_quals) return 0; return 1; } /* Determines the LEVEL and INDEX for the template parameter PARM. */ void template_parm_level_and_index (tree parm, int* level, int* index) { if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM || TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { *index = TEMPLATE_TYPE_IDX (parm); *level = TEMPLATE_TYPE_LEVEL (parm); } else { *index = TEMPLATE_PARM_IDX (parm); *level = TEMPLATE_PARM_LEVEL (parm); } } #define RECUR_AND_CHECK_FAILURE(TP, TA, P, A, S, EP) \ do { \ if (unify (TP, TA, P, A, S, EP)) \ return 1; \ } while (0) /* Unifies the remaining arguments in PACKED_ARGS with the pack expansion at the end of PACKED_PARMS. Returns 0 if the type deduction succeeds, 1 otherwise. STRICT is the same as in fn_type_unification. CALL_ARGS_P is true iff PACKED_ARGS is actually a function call argument list. We'll need to adjust the arguments to make them types. SUBR tells us if this is from a recursive call to type_unification_real, or for comparing two template argument lists. */ static int unify_pack_expansion (tree tparms, tree targs, tree packed_parms, tree packed_args, unification_kind_t strict, bool subr, bool explain_p) { tree parm = TREE_VEC_ELT (packed_parms, TREE_VEC_LENGTH (packed_parms) - 1); tree pattern = PACK_EXPANSION_PATTERN (parm); tree pack, packs = NULL_TREE; int i, start = TREE_VEC_LENGTH (packed_parms) - 1; /* Add in any args remembered from an earlier partial instantiation. */ targs = add_to_template_args (PACK_EXPANSION_EXTRA_ARGS (parm), targs); int levels = TMPL_ARGS_DEPTH (targs); packed_args = expand_template_argument_pack (packed_args); int len = TREE_VEC_LENGTH (packed_args); /* Determine the parameter packs we will be deducing from the pattern, and record their current deductions. */ for (pack = PACK_EXPANSION_PARAMETER_PACKS (parm); pack; pack = TREE_CHAIN (pack)) { tree parm_pack = TREE_VALUE (pack); int idx, level; /* Only template parameter packs can be deduced, not e.g. function parameter packs or __bases or __integer_pack. */ if (!TEMPLATE_PARM_P (parm_pack)) continue; /* Determine the index and level of this parameter pack. */ template_parm_level_and_index (parm_pack, &level, &idx); if (level < levels) continue; /* Keep track of the parameter packs and their corresponding argument packs. */ packs = tree_cons (parm_pack, TMPL_ARG (targs, level, idx), packs); TREE_TYPE (packs) = make_tree_vec (len - start); } /* Loop through all of the arguments that have not yet been unified and unify each with the pattern. */ for (i = start; i < len; i++) { tree parm; bool any_explicit = false; tree arg = TREE_VEC_ELT (packed_args, i); /* For each parameter pack, set its TMPL_ARG to either NULL_TREE or the element of its argument pack at the current index if this argument was explicitly specified. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { int idx, level; tree arg, pargs; template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx); arg = NULL_TREE; if (TREE_VALUE (pack) && (pargs = ARGUMENT_PACK_EXPLICIT_ARGS (TREE_VALUE (pack))) && (i - start < TREE_VEC_LENGTH (pargs))) { any_explicit = true; arg = TREE_VEC_ELT (pargs, i - start); } TMPL_ARG (targs, level, idx) = arg; } /* If we had explicit template arguments, substitute them into the pattern before deduction. */ if (any_explicit) { /* Some arguments might still be unspecified or dependent. */ bool dependent; ++processing_template_decl; dependent = any_dependent_template_arguments_p (targs); if (!dependent) --processing_template_decl; parm = tsubst (pattern, targs, explain_p ? tf_warning_or_error : tf_none, NULL_TREE); if (dependent) --processing_template_decl; if (parm == error_mark_node) return 1; } else parm = pattern; /* Unify the pattern with the current argument. */ if (unify_one_argument (tparms, targs, parm, arg, subr, strict, explain_p)) return 1; /* For each parameter pack, collect the deduced value. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { int idx, level; template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx); TREE_VEC_ELT (TREE_TYPE (pack), i - start) = TMPL_ARG (targs, level, idx); } } /* Verify that the results of unification with the parameter packs produce results consistent with what we've seen before, and make the deduced argument packs available. */ for (pack = packs; pack; pack = TREE_CHAIN (pack)) { tree old_pack = TREE_VALUE (pack); tree new_args = TREE_TYPE (pack); int i, len = TREE_VEC_LENGTH (new_args); int idx, level; bool nondeduced_p = false; /* By default keep the original deduced argument pack. If necessary, more specific code is going to update the resulting deduced argument later down in this function. */ template_parm_level_and_index (TREE_PURPOSE (pack), &level, &idx); TMPL_ARG (targs, level, idx) = old_pack; /* If NEW_ARGS contains any NULL_TREE entries, we didn't actually deduce anything. */ for (i = 0; i < len && !nondeduced_p; ++i) if (TREE_VEC_ELT (new_args, i) == NULL_TREE) nondeduced_p = true; if (nondeduced_p) continue; if (old_pack && ARGUMENT_PACK_INCOMPLETE_P (old_pack)) { /* If we had fewer function args than explicit template args, just use the explicits. */ tree explicit_args = ARGUMENT_PACK_EXPLICIT_ARGS (old_pack); int explicit_len = TREE_VEC_LENGTH (explicit_args); if (len < explicit_len) new_args = explicit_args; } if (!old_pack) { tree result; /* Build the deduced *_ARGUMENT_PACK. */ if (TREE_CODE (TREE_PURPOSE (pack)) == TEMPLATE_PARM_INDEX) { result = make_node (NONTYPE_ARGUMENT_PACK); TREE_CONSTANT (result) = 1; } else result = cxx_make_type (TYPE_ARGUMENT_PACK); SET_ARGUMENT_PACK_ARGS (result, new_args); /* Note the deduced argument packs for this parameter pack. */ TMPL_ARG (targs, level, idx) = result; } else if (ARGUMENT_PACK_INCOMPLETE_P (old_pack) && (ARGUMENT_PACK_ARGS (old_pack) == ARGUMENT_PACK_EXPLICIT_ARGS (old_pack))) { /* We only had the explicitly-provided arguments before, but now we have a complete set of arguments. */ tree explicit_args = ARGUMENT_PACK_EXPLICIT_ARGS (old_pack); SET_ARGUMENT_PACK_ARGS (old_pack, new_args); ARGUMENT_PACK_INCOMPLETE_P (old_pack) = 1; ARGUMENT_PACK_EXPLICIT_ARGS (old_pack) = explicit_args; } else { tree bad_old_arg = NULL_TREE, bad_new_arg = NULL_TREE; tree old_args = ARGUMENT_PACK_ARGS (old_pack); temp_override<int> ovl (TREE_VEC_LENGTH (old_args)); /* During template argument deduction for the aggregate deduction candidate, the number of elements in a trailing parameter pack is only deduced from the number of remaining function arguments if it is not otherwise deduced. */ if (cxx_dialect >= cxx20 && TREE_VEC_LENGTH (new_args) < TREE_VEC_LENGTH (old_args) && builtin_guide_p (TPARMS_PRIMARY_TEMPLATE (tparms))) TREE_VEC_LENGTH (old_args) = TREE_VEC_LENGTH (new_args); if (!comp_template_args (old_args, new_args, &bad_old_arg, &bad_new_arg)) /* Inconsistent unification of this parameter pack. */ return unify_parameter_pack_inconsistent (explain_p, bad_old_arg, bad_new_arg); } } return unify_success (explain_p); } /* Handle unification of the domain of an array. PARM_DOM and ARG_DOM are INTEGER_TYPEs representing the TYPE_DOMAIN of ARRAY_TYPEs. The other parameters and return value are as for unify. */ static int unify_array_domain (tree tparms, tree targs, tree parm_dom, tree arg_dom, bool explain_p) { tree parm_max; tree arg_max; bool parm_cst; bool arg_cst; /* Our representation of array types uses "N - 1" as the TYPE_MAX_VALUE for an array with "N" elements, if "N" is not an integer constant. We cannot unify arbitrarily complex expressions, so we eliminate the MINUS_EXPRs here. */ parm_max = TYPE_MAX_VALUE (parm_dom); parm_cst = TREE_CODE (parm_max) == INTEGER_CST; if (!parm_cst) { gcc_assert (TREE_CODE (parm_max) == MINUS_EXPR); parm_max = TREE_OPERAND (parm_max, 0); } arg_max = TYPE_MAX_VALUE (arg_dom); arg_cst = TREE_CODE (arg_max) == INTEGER_CST; if (!arg_cst) { /* The ARG_MAX may not be a simple MINUS_EXPR, if we are trying to unify the type of a variable with the type of a template parameter. For example: template <unsigned int N> void f (char (&) [N]); int g(); void h(int i) { char a[g(i)]; f(a); } Here, the type of the ARG will be "int [g(i)]", and may be a SAVE_EXPR, etc. */ if (TREE_CODE (arg_max) != MINUS_EXPR) return unify_vla_arg (explain_p, arg_dom); arg_max = TREE_OPERAND (arg_max, 0); } /* If only one of the bounds used a MINUS_EXPR, compensate by adding one to the other bound. */ if (parm_cst && !arg_cst) parm_max = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, parm_max, integer_one_node); else if (arg_cst && !parm_cst) arg_max = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, arg_max, integer_one_node); return unify (tparms, targs, parm_max, arg_max, UNIFY_ALLOW_INTEGER, explain_p); } /* Returns whether T, a P or A in unify, is a type, template or expression. */ enum pa_kind_t { pa_type, pa_tmpl, pa_expr }; static pa_kind_t pa_kind (tree t) { if (PACK_EXPANSION_P (t)) t = PACK_EXPANSION_PATTERN (t); if (TREE_CODE (t) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (t) == UNBOUND_CLASS_TEMPLATE || DECL_TYPE_TEMPLATE_P (t)) return pa_tmpl; else if (TYPE_P (t)) return pa_type; else return pa_expr; } /* Deduce the value of template parameters. TPARMS is the (innermost) set of template parameters to a template. TARGS is the bindings for those template parameters, as determined thus far; TARGS may include template arguments for outer levels of template parameters as well. PARM is a parameter to a template function, or a subcomponent of that parameter; ARG is the corresponding argument. This function attempts to match PARM with ARG in a manner consistent with the existing assignments in TARGS. If more values are deduced, then TARGS is updated. Returns 0 if the type deduction succeeds, 1 otherwise. The parameter STRICT is a bitwise or of the following flags: UNIFY_ALLOW_NONE: Require an exact match between PARM and ARG. UNIFY_ALLOW_MORE_CV_QUAL: Allow the deduced ARG to be more cv-qualified (by qualification conversion) than ARG. UNIFY_ALLOW_LESS_CV_QUAL: Allow the deduced ARG to be less cv-qualified than ARG. UNIFY_ALLOW_DERIVED: Allow the deduced ARG to be a template base class of ARG, or a pointer to a template base class of the type pointed to by ARG. UNIFY_ALLOW_INTEGER: Allow any integral type to be deduced. See the TEMPLATE_PARM_INDEX case for more information. UNIFY_ALLOW_OUTER_LEVEL: This is the outermost level of a deduction. Used to determine validity of qualification conversions. A valid qualification conversion must have const qualified pointers leading up to the inner type which requires additional CV quals, except at the outer level, where const is not required [conv.qual]. It would be normal to set this flag in addition to setting UNIFY_ALLOW_MORE_CV_QUAL. UNIFY_ALLOW_OUTER_MORE_CV_QUAL: This is the outermost level of a deduction, and PARM can be more CV qualified at this point. UNIFY_ALLOW_OUTER_LESS_CV_QUAL: This is the outermost level of a deduction, and PARM can be less CV qualified at this point. */ static int unify (tree tparms, tree targs, tree parm, tree arg, int strict, bool explain_p) { int idx; tree targ; tree tparm; int strict_in = strict; tsubst_flags_t complain = (explain_p ? tf_warning_or_error : tf_none); /* I don't think this will do the right thing with respect to types. But the only case I've seen it in so far has been array bounds, where signedness is the only information lost, and I think that will be okay. VIEW_CONVERT_EXPR can appear with class NTTP, thanks to finish_id_expression_1, and are also OK. */ while (CONVERT_EXPR_P (parm) || TREE_CODE (parm) == VIEW_CONVERT_EXPR) parm = TREE_OPERAND (parm, 0); if (arg == error_mark_node) return unify_invalid (explain_p); if (arg == unknown_type_node || arg == init_list_type_node) /* We can't deduce anything from this, but we might get all the template args from other function args. */ return unify_success (explain_p); if (parm == any_targ_node || arg == any_targ_node) return unify_success (explain_p); /* If PARM uses template parameters, then we can't bail out here, even if ARG == PARM, since we won't record unifications for the template parameters. We might need them if we're trying to figure out which of two things is more specialized. */ if (arg == parm && !uses_template_parms (parm)) return unify_success (explain_p); /* Handle init lists early, so the rest of the function can assume we're dealing with a type. */ if (BRACE_ENCLOSED_INITIALIZER_P (arg)) { tree elt, elttype; unsigned i; tree orig_parm = parm; if (!is_std_init_list (parm) && TREE_CODE (parm) != ARRAY_TYPE) /* We can only deduce from an initializer list argument if the parameter is std::initializer_list or an array; otherwise this is a non-deduced context. */ return unify_success (explain_p); if (TREE_CODE (parm) == ARRAY_TYPE) elttype = TREE_TYPE (parm); else { elttype = TREE_VEC_ELT (CLASSTYPE_TI_ARGS (parm), 0); /* Deduction is defined in terms of a single type, so just punt on the (bizarre) std::initializer_list<T...>. */ if (PACK_EXPANSION_P (elttype)) return unify_success (explain_p); } if (strict != DEDUCE_EXACT && TYPE_P (elttype) && !uses_deducible_template_parms (elttype)) /* If ELTTYPE has no deducible template parms, skip deduction from the list elements. */; else FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (arg), i, elt) { int elt_strict = strict; if (elt == error_mark_node) return unify_invalid (explain_p); if (!BRACE_ENCLOSED_INITIALIZER_P (elt)) { tree type = TREE_TYPE (elt); if (type == error_mark_node) return unify_invalid (explain_p); /* It should only be possible to get here for a call. */ gcc_assert (elt_strict & UNIFY_ALLOW_OUTER_LEVEL); elt_strict |= maybe_adjust_types_for_deduction (DEDUCE_CALL, &elttype, &type, elt); elt = type; } RECUR_AND_CHECK_FAILURE (tparms, targs, elttype, elt, elt_strict, explain_p); } if (TREE_CODE (parm) == ARRAY_TYPE && deducible_array_bound (TYPE_DOMAIN (parm))) { /* Also deduce from the length of the initializer list. */ tree max = size_int (CONSTRUCTOR_NELTS (arg)); tree idx = compute_array_index_type (NULL_TREE, max, tf_none); if (idx == error_mark_node) return unify_invalid (explain_p); return unify_array_domain (tparms, targs, TYPE_DOMAIN (parm), idx, explain_p); } /* If the std::initializer_list<T> deduction worked, replace the deduced A with std::initializer_list<A>. */ if (orig_parm != parm) { idx = TEMPLATE_TYPE_IDX (orig_parm); targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx); targ = listify (targ); TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = targ; } return unify_success (explain_p); } /* If parm and arg aren't the same kind of thing (template, type, or expression), fail early. */ if (pa_kind (parm) != pa_kind (arg)) return unify_invalid (explain_p); /* Immediately reject some pairs that won't unify because of cv-qualification mismatches. */ if (TREE_CODE (arg) == TREE_CODE (parm) && TYPE_P (arg) /* It is the elements of the array which hold the cv quals of an array type, and the elements might be template type parms. We'll check when we recurse. */ && TREE_CODE (arg) != ARRAY_TYPE /* We check the cv-qualifiers when unifying with template type parameters below. We want to allow ARG `const T' to unify with PARM `T' for example, when computing which of two templates is more specialized, for example. */ && TREE_CODE (arg) != TEMPLATE_TYPE_PARM && !check_cv_quals_for_unify (strict_in, arg, parm)) return unify_cv_qual_mismatch (explain_p, parm, arg); if (!(strict & UNIFY_ALLOW_OUTER_LEVEL) && TYPE_P (parm) && !CP_TYPE_CONST_P (parm)) strict &= ~UNIFY_ALLOW_MORE_CV_QUAL; strict &= ~UNIFY_ALLOW_OUTER_LEVEL; strict &= ~UNIFY_ALLOW_DERIVED; strict &= ~UNIFY_ALLOW_OUTER_MORE_CV_QUAL; strict &= ~UNIFY_ALLOW_OUTER_LESS_CV_QUAL; switch (TREE_CODE (parm)) { case TYPENAME_TYPE: case SCOPE_REF: case UNBOUND_CLASS_TEMPLATE: /* In a type which contains a nested-name-specifier, template argument values cannot be deduced for template parameters used within the nested-name-specifier. */ return unify_success (explain_p); case TEMPLATE_TYPE_PARM: case TEMPLATE_TEMPLATE_PARM: case BOUND_TEMPLATE_TEMPLATE_PARM: tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0)); if (error_operand_p (tparm)) return unify_invalid (explain_p); if (TEMPLATE_TYPE_LEVEL (parm) != template_decl_level (tparm)) /* The PARM is not one we're trying to unify. Just check to see if it matches ARG. */ { if (TREE_CODE (arg) == TREE_CODE (parm) && (is_auto (parm) ? is_auto (arg) : same_type_p (parm, arg))) return unify_success (explain_p); else return unify_type_mismatch (explain_p, parm, arg); } idx = TEMPLATE_TYPE_IDX (parm); targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx); tparm = TREE_VALUE (TREE_VEC_ELT (tparms, idx)); if (error_operand_p (tparm)) return unify_invalid (explain_p); /* Check for mixed types and values. */ if ((TREE_CODE (parm) == TEMPLATE_TYPE_PARM && TREE_CODE (tparm) != TYPE_DECL) || (TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM && TREE_CODE (tparm) != TEMPLATE_DECL)) gcc_unreachable (); if (TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { if ((strict_in & UNIFY_ALLOW_DERIVED) && CLASS_TYPE_P (arg)) { /* First try to match ARG directly. */ tree t = try_class_unification (tparms, targs, parm, arg, explain_p); if (!t) { /* Otherwise, look for a suitable base of ARG, as below. */ enum template_base_result r; r = get_template_base (tparms, targs, parm, arg, explain_p, &t); if (!t) return unify_no_common_base (explain_p, r, parm, arg); arg = t; } } /* ARG must be constructed from a template class or a template template parameter. */ else if (TREE_CODE (arg) != BOUND_TEMPLATE_TEMPLATE_PARM && !CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P (arg)) return unify_template_deduction_failure (explain_p, parm, arg); /* Deduce arguments T, i from TT<T> or TT<i>. */ if (unify_bound_ttp_args (tparms, targs, parm, arg, explain_p)) return 1; arg = TYPE_TI_TEMPLATE (arg); /* Fall through to deduce template name. */ } if (TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM || TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM) { /* Deduce template name TT from TT, TT<>, TT<T> and TT<i>. */ /* Simple cases: Value already set, does match or doesn't. */ if (targ != NULL_TREE && template_args_equal (targ, arg)) return unify_success (explain_p); else if (targ) return unify_inconsistency (explain_p, parm, targ, arg); } else { /* If PARM is `const T' and ARG is only `int', we don't have a match unless we are allowing additional qualification. If ARG is `const int' and PARM is just `T' that's OK; that binds `const int' to `T'. */ if (!check_cv_quals_for_unify (strict_in | UNIFY_ALLOW_LESS_CV_QUAL, arg, parm)) return unify_cv_qual_mismatch (explain_p, parm, arg); /* Consider the case where ARG is `const volatile int' and PARM is `const T'. Then, T should be `volatile int'. */ arg = cp_build_qualified_type_real (arg, cp_type_quals (arg) & ~cp_type_quals (parm), tf_none); if (arg == error_mark_node) return unify_invalid (explain_p); /* Simple cases: Value already set, does match or doesn't. */ if (targ != NULL_TREE && same_type_p (targ, arg)) return unify_success (explain_p); else if (targ) return unify_inconsistency (explain_p, parm, targ, arg); /* Make sure that ARG is not a variable-sized array. (Note that were talking about variable-sized arrays (like `int[n]'), rather than arrays of unknown size (like `int[]').) We'll get very confused by such a type since the bound of the array is not constant, and therefore not mangleable. Besides, such types are not allowed in ISO C++, so we can do as we please here. We do allow them for 'auto' deduction, since that isn't ABI-exposed. */ if (!is_auto (parm) && variably_modified_type_p (arg, NULL_TREE)) return unify_vla_arg (explain_p, arg); /* Strip typedefs as in convert_template_argument. */ arg = canonicalize_type_argument (arg, tf_none); } /* If ARG is a parameter pack or an expansion, we cannot unify against it unless PARM is also a parameter pack. */ if ((template_parameter_pack_p (arg) || PACK_EXPANSION_P (arg)) && !template_parameter_pack_p (parm)) return unify_parameter_pack_mismatch (explain_p, parm, arg); /* If the argument deduction results is a METHOD_TYPE, then there is a problem. METHOD_TYPE doesn't map to any real C++ type the result of the deduction cannot be of that type. */ if (TREE_CODE (arg) == METHOD_TYPE) return unify_method_type_error (explain_p, arg); TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = arg; return unify_success (explain_p); case TEMPLATE_PARM_INDEX: tparm = TREE_VALUE (TREE_VEC_ELT (tparms, 0)); if (error_operand_p (tparm)) return unify_invalid (explain_p); if (TEMPLATE_PARM_LEVEL (parm) != template_decl_level (tparm)) { /* The PARM is not one we're trying to unify. Just check to see if it matches ARG. */ int result = !(TREE_CODE (arg) == TREE_CODE (parm) && cp_tree_equal (parm, arg)); if (result) unify_expression_unequal (explain_p, parm, arg); return result; } idx = TEMPLATE_PARM_IDX (parm); targ = TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx); if (targ) { if ((strict & UNIFY_ALLOW_INTEGER) && TREE_TYPE (targ) && TREE_TYPE (arg) && CP_INTEGRAL_TYPE_P (TREE_TYPE (targ))) /* We're deducing from an array bound, the type doesn't matter. */ arg = fold_convert (TREE_TYPE (targ), arg); int x = !cp_tree_equal (targ, arg); if (x) unify_inconsistency (explain_p, parm, targ, arg); return x; } /* [temp.deduct.type] If, in the declaration of a function template with a non-type template-parameter, the non-type template-parameter is used in an expression in the function parameter-list and, if the corresponding template-argument is deduced, the template-argument type shall match the type of the template-parameter exactly, except that a template-argument deduced from an array bound may be of any integral type. The non-type parameter might use already deduced type parameters. */ tparm = TREE_TYPE (parm); if (TEMPLATE_PARM_LEVEL (parm) > TMPL_ARGS_DEPTH (targs)) /* We don't have enough levels of args to do any substitution. This can happen in the context of -fnew-ttp-matching. */; else { ++processing_template_decl; tparm = tsubst (tparm, targs, tf_none, NULL_TREE); --processing_template_decl; if (tree a = type_uses_auto (tparm)) { tparm = do_auto_deduction (tparm, arg, a, complain, adc_unify); if (tparm == error_mark_node) return 1; } } if (!TREE_TYPE (arg)) /* Template-parameter dependent expression. Just accept it for now. It will later be processed in convert_template_argument. */ ; else if (same_type_ignoring_top_level_qualifiers_p (non_reference (TREE_TYPE (arg)), non_reference (tparm))) /* OK. Ignore top-level quals here because a class-type template parameter object is const. */; else if ((strict & UNIFY_ALLOW_INTEGER) && CP_INTEGRAL_TYPE_P (tparm)) /* Convert the ARG to the type of PARM; the deduced non-type template argument must exactly match the types of the corresponding parameter. */ arg = fold (build_nop (tparm, arg)); else if (uses_template_parms (tparm)) { /* We haven't deduced the type of this parameter yet. */ if (cxx_dialect >= cxx17 /* We deduce from array bounds in try_array_deduction. */ && !(strict & UNIFY_ALLOW_INTEGER)) { /* Deduce it from the non-type argument. */ tree atype = TREE_TYPE (arg); RECUR_AND_CHECK_FAILURE (tparms, targs, tparm, atype, UNIFY_ALLOW_NONE, explain_p); } else /* Try again later. */ return unify_success (explain_p); } else return unify_type_mismatch (explain_p, tparm, TREE_TYPE (arg)); /* If ARG is a parameter pack or an expansion, we cannot unify against it unless PARM is also a parameter pack. */ if ((template_parameter_pack_p (arg) || PACK_EXPANSION_P (arg)) && !TEMPLATE_PARM_PARAMETER_PACK (parm)) return unify_parameter_pack_mismatch (explain_p, parm, arg); { bool removed_attr = false; arg = strip_typedefs_expr (arg, &removed_attr); } TREE_VEC_ELT (INNERMOST_TEMPLATE_ARGS (targs), idx) = arg; return unify_success (explain_p); case PTRMEM_CST: { /* A pointer-to-member constant can be unified only with another constant. */ if (TREE_CODE (arg) != PTRMEM_CST) return unify_ptrmem_cst_mismatch (explain_p, parm, arg); /* Just unify the class member. It would be useless (and possibly wrong, depending on the strict flags) to unify also PTRMEM_CST_CLASS, because we want to be sure that both parm and arg refer to the same variable, even if through different classes. For instance: struct A { int x; }; struct B : A { }; Unification of &A::x and &B::x must succeed. */ return unify (tparms, targs, PTRMEM_CST_MEMBER (parm), PTRMEM_CST_MEMBER (arg), strict, explain_p); } case POINTER_TYPE: { if (!TYPE_PTR_P (arg)) return unify_type_mismatch (explain_p, parm, arg); /* [temp.deduct.call] A can be another pointer or pointer to member type that can be converted to the deduced A via a qualification conversion (_conv.qual_). We pass down STRICT here rather than UNIFY_ALLOW_NONE. This will allow for additional cv-qualification of the pointed-to types if appropriate. */ if (TREE_CODE (TREE_TYPE (arg)) == RECORD_TYPE) /* The derived-to-base conversion only persists through one level of pointers. */ strict |= (strict_in & UNIFY_ALLOW_DERIVED); return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict, explain_p); } case REFERENCE_TYPE: if (!TYPE_REF_P (arg)) return unify_type_mismatch (explain_p, parm, arg); return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict & UNIFY_ALLOW_MORE_CV_QUAL, explain_p); case ARRAY_TYPE: if (TREE_CODE (arg) != ARRAY_TYPE) return unify_type_mismatch (explain_p, parm, arg); if ((TYPE_DOMAIN (parm) == NULL_TREE) != (TYPE_DOMAIN (arg) == NULL_TREE)) return unify_type_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict & UNIFY_ALLOW_MORE_CV_QUAL, explain_p); if (TYPE_DOMAIN (parm) != NULL_TREE) return unify_array_domain (tparms, targs, TYPE_DOMAIN (parm), TYPE_DOMAIN (arg), explain_p); return unify_success (explain_p); case REAL_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case INTEGER_TYPE: case BOOLEAN_TYPE: case ENUMERAL_TYPE: case VOID_TYPE: case NULLPTR_TYPE: if (TREE_CODE (arg) != TREE_CODE (parm)) return unify_type_mismatch (explain_p, parm, arg); /* We have already checked cv-qualification at the top of the function. */ if (!same_type_ignoring_top_level_qualifiers_p (arg, parm)) return unify_type_mismatch (explain_p, parm, arg); /* As far as unification is concerned, this wins. Later checks will invalidate it if necessary. */ return unify_success (explain_p); /* Types INTEGER_CST and MINUS_EXPR can come from array bounds. */ /* Type INTEGER_CST can come from ordinary constant template args. */ case INTEGER_CST: while (CONVERT_EXPR_P (arg)) arg = TREE_OPERAND (arg, 0); if (TREE_CODE (arg) != INTEGER_CST) return unify_template_argument_mismatch (explain_p, parm, arg); return (tree_int_cst_equal (parm, arg) ? unify_success (explain_p) : unify_template_argument_mismatch (explain_p, parm, arg)); case TREE_VEC: { int i, len, argslen; int parm_variadic_p = 0; if (TREE_CODE (arg) != TREE_VEC) return unify_template_argument_mismatch (explain_p, parm, arg); len = TREE_VEC_LENGTH (parm); argslen = TREE_VEC_LENGTH (arg); /* Check for pack expansions in the parameters. */ for (i = 0; i < len; ++i) { if (PACK_EXPANSION_P (TREE_VEC_ELT (parm, i))) { if (i == len - 1) /* We can unify against something with a trailing parameter pack. */ parm_variadic_p = 1; else /* [temp.deduct.type]/9: If the template argument list of P contains a pack expansion that is not the last template argument, the entire template argument list is a non-deduced context. */ return unify_success (explain_p); } } /* If we don't have enough arguments to satisfy the parameters (not counting the pack expression at the end), or we have too many arguments for a parameter list that doesn't end in a pack expression, we can't unify. */ if (parm_variadic_p ? argslen < len - parm_variadic_p : argslen != len) return unify_arity (explain_p, TREE_VEC_LENGTH (arg), len); /* Unify all of the parameters that precede the (optional) pack expression. */ for (i = 0; i < len - parm_variadic_p; ++i) { RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_VEC_ELT (parm, i), TREE_VEC_ELT (arg, i), UNIFY_ALLOW_NONE, explain_p); } if (parm_variadic_p) return unify_pack_expansion (tparms, targs, parm, arg, DEDUCE_EXACT, /*subr=*/true, explain_p); return unify_success (explain_p); } case RECORD_TYPE: case UNION_TYPE: if (TREE_CODE (arg) != TREE_CODE (parm)) return unify_type_mismatch (explain_p, parm, arg); if (TYPE_PTRMEMFUNC_P (parm)) { if (!TYPE_PTRMEMFUNC_P (arg)) return unify_type_mismatch (explain_p, parm, arg); return unify (tparms, targs, TYPE_PTRMEMFUNC_FN_TYPE (parm), TYPE_PTRMEMFUNC_FN_TYPE (arg), strict, explain_p); } else if (TYPE_PTRMEMFUNC_P (arg)) return unify_type_mismatch (explain_p, parm, arg); if (CLASSTYPE_TEMPLATE_INFO (parm)) { tree t = NULL_TREE; if (strict_in & UNIFY_ALLOW_DERIVED) { /* First, we try to unify the PARM and ARG directly. */ t = try_class_unification (tparms, targs, parm, arg, explain_p); if (!t) { /* Fallback to the special case allowed in [temp.deduct.call]: If P is a class, and P has the form template-id, then A can be a derived class of the deduced A. Likewise, if P is a pointer to a class of the form template-id, A can be a pointer to a derived class pointed to by the deduced A. */ enum template_base_result r; r = get_template_base (tparms, targs, parm, arg, explain_p, &t); if (!t) { /* Don't give the derived diagnostic if we're already dealing with the same template. */ bool same_template = (CLASSTYPE_TEMPLATE_INFO (arg) && (CLASSTYPE_TI_TEMPLATE (parm) == CLASSTYPE_TI_TEMPLATE (arg))); return unify_no_common_base (explain_p && !same_template, r, parm, arg); } } } else if (CLASSTYPE_TEMPLATE_INFO (arg) && (CLASSTYPE_TI_TEMPLATE (parm) == CLASSTYPE_TI_TEMPLATE (arg))) /* Perhaps PARM is something like S<U> and ARG is S<int>. Then, we should unify `int' and `U'. */ t = arg; else /* There's no chance of unification succeeding. */ return unify_type_mismatch (explain_p, parm, arg); return unify (tparms, targs, CLASSTYPE_TI_ARGS (parm), CLASSTYPE_TI_ARGS (t), UNIFY_ALLOW_NONE, explain_p); } else if (!same_type_ignoring_top_level_qualifiers_p (parm, arg)) return unify_type_mismatch (explain_p, parm, arg); return unify_success (explain_p); case METHOD_TYPE: case FUNCTION_TYPE: { unsigned int nargs; tree *args; tree a; unsigned int i; if (TREE_CODE (arg) != TREE_CODE (parm)) return unify_type_mismatch (explain_p, parm, arg); /* CV qualifications for methods can never be deduced, they must match exactly. We need to check them explicitly here, because type_unification_real treats them as any other cv-qualified parameter. */ if (TREE_CODE (parm) == METHOD_TYPE && (!check_cv_quals_for_unify (UNIFY_ALLOW_NONE, class_of_this_parm (arg), class_of_this_parm (parm)))) return unify_cv_qual_mismatch (explain_p, parm, arg); if (TREE_CODE (arg) == FUNCTION_TYPE && type_memfn_quals (parm) != type_memfn_quals (arg)) return unify_cv_qual_mismatch (explain_p, parm, arg); if (type_memfn_rqual (parm) != type_memfn_rqual (arg)) return unify_type_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), UNIFY_ALLOW_NONE, explain_p); nargs = list_length (TYPE_ARG_TYPES (arg)); args = XALLOCAVEC (tree, nargs); for (a = TYPE_ARG_TYPES (arg), i = 0; a != NULL_TREE && a != void_list_node; a = TREE_CHAIN (a), ++i) args[i] = TREE_VALUE (a); nargs = i; if (type_unification_real (tparms, targs, TYPE_ARG_TYPES (parm), args, nargs, 1, DEDUCE_EXACT, NULL, explain_p)) return 1; if (flag_noexcept_type) { tree pspec = TYPE_RAISES_EXCEPTIONS (parm); tree aspec = canonical_eh_spec (TYPE_RAISES_EXCEPTIONS (arg)); if (pspec == NULL_TREE) pspec = noexcept_false_spec; if (aspec == NULL_TREE) aspec = noexcept_false_spec; if (TREE_PURPOSE (pspec) && TREE_PURPOSE (aspec) && uses_template_parms (TREE_PURPOSE (pspec))) RECUR_AND_CHECK_FAILURE (tparms, targs, TREE_PURPOSE (pspec), TREE_PURPOSE (aspec), UNIFY_ALLOW_NONE, explain_p); else if (nothrow_spec_p (pspec) && !nothrow_spec_p (aspec)) return unify_type_mismatch (explain_p, parm, arg); } return 0; } case OFFSET_TYPE: /* Unify a pointer to member with a pointer to member function, which deduces the type of the member as a function type. */ if (TYPE_PTRMEMFUNC_P (arg)) { /* Check top-level cv qualifiers */ if (!check_cv_quals_for_unify (UNIFY_ALLOW_NONE, arg, parm)) return unify_cv_qual_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TYPE_OFFSET_BASETYPE (parm), TYPE_PTRMEMFUNC_OBJECT_TYPE (arg), UNIFY_ALLOW_NONE, explain_p); /* Determine the type of the function we are unifying against. */ tree fntype = static_fn_type (arg); return unify (tparms, targs, TREE_TYPE (parm), fntype, strict, explain_p); } if (TREE_CODE (arg) != OFFSET_TYPE) return unify_type_mismatch (explain_p, parm, arg); RECUR_AND_CHECK_FAILURE (tparms, targs, TYPE_OFFSET_BASETYPE (parm), TYPE_OFFSET_BASETYPE (arg), UNIFY_ALLOW_NONE, explain_p); return unify (tparms, targs, TREE_TYPE (parm), TREE_TYPE (arg), strict, explain_p); case CONST_DECL: if (DECL_TEMPLATE_PARM_P (parm)) return unify (tparms, targs, DECL_INITIAL (parm), arg, strict, explain_p); if (arg != scalar_constant_value (parm)) return unify_template_argument_mismatch (explain_p, parm, arg); return unify_success (explain_p); case FIELD_DECL: case TEMPLATE_DECL: /* Matched cases are handled by the ARG == PARM test above. */ return unify_template_argument_mismatch (explain_p, parm, arg); case VAR_DECL: /* We might get a variable as a non-type template argument in parm if the corresponding parameter is type-dependent. Make any necessary adjustments based on whether arg is a reference. */ if (CONSTANT_CLASS_P (arg)) parm = fold_non_dependent_expr (parm, complain); else if (REFERENCE_REF_P (arg)) { tree sub = TREE_OPERAND (arg, 0); STRIP_NOPS (sub); if (TREE_CODE (sub) == ADDR_EXPR) arg = TREE_OPERAND (sub, 0); } /* Now use the normal expression code to check whether they match. */ goto expr; case TYPE_ARGUMENT_PACK: case NONTYPE_ARGUMENT_PACK: return unify (tparms, targs, ARGUMENT_PACK_ARGS (parm), ARGUMENT_PACK_ARGS (arg), strict, explain_p); case TYPEOF_TYPE: case DECLTYPE_TYPE: case UNDERLYING_TYPE: /* Cannot deduce anything from TYPEOF_TYPE, DECLTYPE_TYPE, or UNDERLYING_TYPE nodes. */ return unify_success (explain_p); case ERROR_MARK: /* Unification fails if we hit an error node. */ return unify_invalid (explain_p); case INDIRECT_REF: if (REFERENCE_REF_P (parm)) { bool pexp = PACK_EXPANSION_P (arg); if (pexp) arg = PACK_EXPANSION_PATTERN (arg); if (REFERENCE_REF_P (arg)) arg = TREE_OPERAND (arg, 0); if (pexp) arg = make_pack_expansion (arg, complain); return unify (tparms, targs, TREE_OPERAND (parm, 0), arg, strict, explain_p); } /* FALLTHRU */ default: /* An unresolved overload is a nondeduced context. */ if (is_overloaded_fn (parm) || type_unknown_p (parm)) return unify_success (explain_p); gcc_assert (EXPR_P (parm) || COMPOUND_LITERAL_P (parm) || TREE_CODE (parm) == TRAIT_EXPR); expr: /* We must be looking at an expression. This can happen with something like: template <int I> void foo(S<I>, S<I + 2>); or template<typename T> void foo(A<T, T{}>); This is a "non-deduced context": [deduct.type] The non-deduced contexts are: --A non-type template argument or an array bound in which a subexpression references a template parameter. In these cases, we assume deduction succeeded, but don't actually infer any unifications. */ if (!uses_template_parms (parm) && !template_args_equal (parm, arg)) return unify_expression_unequal (explain_p, parm, arg); else return unify_success (explain_p); } } #undef RECUR_AND_CHECK_FAILURE /* Note that DECL can be defined in this translation unit, if required. */ static void mark_definable (tree decl) { tree clone; DECL_NOT_REALLY_EXTERN (decl) = 1; FOR_EACH_CLONE (clone, decl) DECL_NOT_REALLY_EXTERN (clone) = 1; } /* Called if RESULT is explicitly instantiated, or is a member of an explicitly instantiated class. */ void mark_decl_instantiated (tree result, int extern_p) { SET_DECL_EXPLICIT_INSTANTIATION (result); /* If this entity has already been written out, it's too late to make any modifications. */ if (TREE_ASM_WRITTEN (result)) return; /* For anonymous namespace we don't need to do anything. */ if (decl_anon_ns_mem_p (result)) { gcc_assert (!TREE_PUBLIC (result)); return; } if (TREE_CODE (result) != FUNCTION_DECL) /* The TREE_PUBLIC flag for function declarations will have been set correctly by tsubst. */ TREE_PUBLIC (result) = 1; /* This might have been set by an earlier implicit instantiation. */ DECL_COMDAT (result) = 0; if (extern_p) DECL_NOT_REALLY_EXTERN (result) = 0; else { mark_definable (result); mark_needed (result); /* Always make artificials weak. */ if (DECL_ARTIFICIAL (result) && flag_weak) comdat_linkage (result); /* For WIN32 we also want to put explicit instantiations in linkonce sections. */ else if (TREE_PUBLIC (result)) maybe_make_one_only (result); if (TREE_CODE (result) == FUNCTION_DECL && DECL_TEMPLATE_INSTANTIATED (result)) /* If the function has already been instantiated, clear DECL_EXTERNAL, since start_preparsed_function wouldn't have if we had an earlier extern explicit instantiation. */ DECL_EXTERNAL (result) = 0; } /* If EXTERN_P, then this function will not be emitted -- unless followed by an explicit instantiation, at which point its linkage will be adjusted. If !EXTERN_P, then this function will be emitted here. In neither circumstance do we want import_export_decl to adjust the linkage. */ DECL_INTERFACE_KNOWN (result) = 1; } /* Subroutine of more_specialized_fn: check whether TARGS is missing any important template arguments. If any are missing, we check whether they're important by using error_mark_node for substituting into any args that were used for partial ordering (the ones between ARGS and END) and seeing if it bubbles up. */ static bool check_undeduced_parms (tree targs, tree args, tree end) { bool found = false; int i; for (i = TREE_VEC_LENGTH (targs) - 1; i >= 0; --i) if (TREE_VEC_ELT (targs, i) == NULL_TREE) { found = true; TREE_VEC_ELT (targs, i) = error_mark_node; } if (found) { tree substed = tsubst_arg_types (args, targs, end, tf_none, NULL_TREE); if (substed == error_mark_node) return true; } return false; } /* Given two function templates PAT1 and PAT2, return: 1 if PAT1 is more specialized than PAT2 as described in [temp.func.order]. -1 if PAT2 is more specialized than PAT1. 0 if neither is more specialized. LEN indicates the number of parameters we should consider (defaulted parameters should not be considered). The 1998 std underspecified function template partial ordering, and DR214 addresses the issue. We take pairs of arguments, one from each of the templates, and deduce them against each other. One of the templates will be more specialized if all the *other* template's arguments deduce against its arguments and at least one of its arguments *does* *not* deduce against the other template's corresponding argument. Deduction is done as for class templates. The arguments used in deduction have reference and top level cv qualifiers removed. Iff both arguments were originally reference types *and* deduction succeeds in both directions, an lvalue reference wins against an rvalue reference and otherwise the template with the more cv-qualified argument wins for that pairing (if neither is more cv-qualified, they both are equal). Unlike regular deduction, after all the arguments have been deduced in this way, we do *not* verify the deduced template argument values can be substituted into non-deduced contexts. The logic can be a bit confusing here, because we look at deduce1 and targs1 to see if pat2 is at least as specialized, and vice versa; if we can find template arguments for pat1 to make arg1 look like arg2, that means that arg2 is at least as specialized as arg1. */ int more_specialized_fn (tree pat1, tree pat2, int len) { tree decl1 = DECL_TEMPLATE_RESULT (pat1); tree decl2 = DECL_TEMPLATE_RESULT (pat2); tree targs1 = make_tree_vec (DECL_NTPARMS (pat1)); tree targs2 = make_tree_vec (DECL_NTPARMS (pat2)); tree tparms1 = DECL_INNERMOST_TEMPLATE_PARMS (pat1); tree tparms2 = DECL_INNERMOST_TEMPLATE_PARMS (pat2); tree args1 = TYPE_ARG_TYPES (TREE_TYPE (decl1)); tree args2 = TYPE_ARG_TYPES (TREE_TYPE (decl2)); tree origs1, origs2; bool lose1 = false; bool lose2 = false; /* Remove the this parameter from non-static member functions. If one is a non-static member function and the other is not a static member function, remove the first parameter from that function also. This situation occurs for operator functions where we locate both a member function (with this pointer) and non-member operator (with explicit first operand). */ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl1)) { len--; /* LEN is the number of significant arguments for DECL1 */ args1 = TREE_CHAIN (args1); if (!DECL_STATIC_FUNCTION_P (decl2)) args2 = TREE_CHAIN (args2); } else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (decl2)) { args2 = TREE_CHAIN (args2); if (!DECL_STATIC_FUNCTION_P (decl1)) { len--; args1 = TREE_CHAIN (args1); } } /* If only one is a conversion operator, they are unordered. */ if (DECL_CONV_FN_P (decl1) != DECL_CONV_FN_P (decl2)) return 0; /* Consider the return type for a conversion function */ if (DECL_CONV_FN_P (decl1)) { args1 = tree_cons (NULL_TREE, TREE_TYPE (TREE_TYPE (decl1)), args1); args2 = tree_cons (NULL_TREE, TREE_TYPE (TREE_TYPE (decl2)), args2); len++; } processing_template_decl++; origs1 = args1; origs2 = args2; while (len-- /* Stop when an ellipsis is seen. */ && args1 != NULL_TREE && args2 != NULL_TREE) { tree arg1 = TREE_VALUE (args1); tree arg2 = TREE_VALUE (args2); int deduce1, deduce2; int quals1 = -1; int quals2 = -1; int ref1 = 0; int ref2 = 0; if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION && TREE_CODE (arg2) == TYPE_PACK_EXPANSION) { /* When both arguments are pack expansions, we need only unify the patterns themselves. */ arg1 = PACK_EXPANSION_PATTERN (arg1); arg2 = PACK_EXPANSION_PATTERN (arg2); /* This is the last comparison we need to do. */ len = 0; } if (TYPE_REF_P (arg1)) { ref1 = TYPE_REF_IS_RVALUE (arg1) + 1; arg1 = TREE_TYPE (arg1); quals1 = cp_type_quals (arg1); } if (TYPE_REF_P (arg2)) { ref2 = TYPE_REF_IS_RVALUE (arg2) + 1; arg2 = TREE_TYPE (arg2); quals2 = cp_type_quals (arg2); } arg1 = TYPE_MAIN_VARIANT (arg1); arg2 = TYPE_MAIN_VARIANT (arg2); if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION) { int i, len2 = remaining_arguments (args2); tree parmvec = make_tree_vec (1); tree argvec = make_tree_vec (len2); tree ta = args2; /* Setup the parameter vector, which contains only ARG1. */ TREE_VEC_ELT (parmvec, 0) = arg1; /* Setup the argument vector, which contains the remaining arguments. */ for (i = 0; i < len2; i++, ta = TREE_CHAIN (ta)) TREE_VEC_ELT (argvec, i) = TREE_VALUE (ta); deduce1 = (unify_pack_expansion (tparms1, targs1, parmvec, argvec, DEDUCE_EXACT, /*subr=*/true, /*explain_p=*/false) == 0); /* We cannot deduce in the other direction, because ARG1 is a pack expansion but ARG2 is not. */ deduce2 = 0; } else if (TREE_CODE (arg2) == TYPE_PACK_EXPANSION) { int i, len1 = remaining_arguments (args1); tree parmvec = make_tree_vec (1); tree argvec = make_tree_vec (len1); tree ta = args1; /* Setup the parameter vector, which contains only ARG1. */ TREE_VEC_ELT (parmvec, 0) = arg2; /* Setup the argument vector, which contains the remaining arguments. */ for (i = 0; i < len1; i++, ta = TREE_CHAIN (ta)) TREE_VEC_ELT (argvec, i) = TREE_VALUE (ta); deduce2 = (unify_pack_expansion (tparms2, targs2, parmvec, argvec, DEDUCE_EXACT, /*subr=*/true, /*explain_p=*/false) == 0); /* We cannot deduce in the other direction, because ARG2 is a pack expansion but ARG1 is not.*/ deduce1 = 0; } else { /* The normal case, where neither argument is a pack expansion. */ deduce1 = (unify (tparms1, targs1, arg1, arg2, UNIFY_ALLOW_NONE, /*explain_p=*/false) == 0); deduce2 = (unify (tparms2, targs2, arg2, arg1, UNIFY_ALLOW_NONE, /*explain_p=*/false) == 0); } /* If we couldn't deduce arguments for tparms1 to make arg1 match arg2, then arg2 is not as specialized as arg1. */ if (!deduce1) lose2 = true; if (!deduce2) lose1 = true; /* "If, for a given type, deduction succeeds in both directions (i.e., the types are identical after the transformations above) and both P and A were reference types (before being replaced with the type referred to above): - if the type from the argument template was an lvalue reference and the type from the parameter template was not, the argument type is considered to be more specialized than the other; otherwise, - if the type from the argument template is more cv-qualified than the type from the parameter template (as described above), the argument type is considered to be more specialized than the other; otherwise, - neither type is more specialized than the other." */ if (deduce1 && deduce2) { if (ref1 && ref2 && ref1 != ref2) { if (ref1 > ref2) lose1 = true; else lose2 = true; } else if (quals1 != quals2 && quals1 >= 0 && quals2 >= 0) { if ((quals1 & quals2) == quals2) lose2 = true; if ((quals1 & quals2) == quals1) lose1 = true; } } if (lose1 && lose2) /* We've failed to deduce something in either direction. These must be unordered. */ break; if (TREE_CODE (arg1) == TYPE_PACK_EXPANSION || TREE_CODE (arg2) == TYPE_PACK_EXPANSION) /* We have already processed all of the arguments in our handing of the pack expansion type. */ len = 0; args1 = TREE_CHAIN (args1); args2 = TREE_CHAIN (args2); } /* "In most cases, all template parameters must have values in order for deduction to succeed, but for partial ordering purposes a template parameter may remain without a value provided it is not used in the types being used for partial ordering." Thus, if we are missing any of the targs1 we need to substitute into origs1, then pat2 is not as specialized as pat1. This can happen when there is a nondeduced context. */ if (!lose2 && check_undeduced_parms (targs1, origs1, args1)) lose2 = true; if (!lose1 && check_undeduced_parms (targs2, origs2, args2)) lose1 = true; processing_template_decl--; /* If both deductions succeed, the partial ordering selects the more constrained template. */ /* P2113: If the corresponding template-parameters of the template-parameter-lists are not equivalent ([temp.over.link]) or if the function parameters that positionally correspond between the two templates are not of the same type, neither template is more specialized than the other. */ if (!lose1 && !lose2 && comp_template_parms (DECL_TEMPLATE_PARMS (pat1), DECL_TEMPLATE_PARMS (pat2)) && compparms (origs1, origs2)) { int winner = more_constrained (decl1, decl2); if (winner > 0) lose2 = true; else if (winner < 0) lose1 = true; } /* All things being equal, if the next argument is a pack expansion for one function but not for the other, prefer the non-variadic function. FIXME this is bogus; see c++/41958. */ if (lose1 == lose2 && args1 && TREE_VALUE (args1) && args2 && TREE_VALUE (args2)) { lose1 = TREE_CODE (TREE_VALUE (args1)) == TYPE_PACK_EXPANSION; lose2 = TREE_CODE (TREE_VALUE (args2)) == TYPE_PACK_EXPANSION; } if (lose1 == lose2) return 0; else if (!lose1) return 1; else return -1; } /* Determine which of two partial specializations of TMPL is more specialized. PAT1 is a TREE_LIST whose TREE_VALUE is the TEMPLATE_DECL corresponding to the first partial specialization. The TREE_PURPOSE is the innermost set of template parameters for the partial specialization. PAT2 is similar, but for the second template. Return 1 if the first partial specialization is more specialized; -1 if the second is more specialized; 0 if neither is more specialized. See [temp.class.order] for information about determining which of two templates is more specialized. */ static int more_specialized_partial_spec (tree tmpl, tree pat1, tree pat2) { tree targs; int winner = 0; bool any_deductions = false; tree tmpl1 = TREE_VALUE (pat1); tree tmpl2 = TREE_VALUE (pat2); tree specargs1 = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl1))); tree specargs2 = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (tmpl2))); /* Just like what happens for functions, if we are ordering between different template specializations, we may encounter dependent types in the arguments, and we need our dependency check functions to behave correctly. */ ++processing_template_decl; targs = get_partial_spec_bindings (tmpl, tmpl1, specargs2); if (targs) { --winner; any_deductions = true; } targs = get_partial_spec_bindings (tmpl, tmpl2, specargs1); if (targs) { ++winner; any_deductions = true; } --processing_template_decl; /* If both deductions succeed, the partial ordering selects the more constrained template. */ if (!winner && any_deductions) winner = more_constrained (tmpl1, tmpl2); /* In the case of a tie where at least one of the templates has a parameter pack at the end, the template with the most non-packed parameters wins. */ if (winner == 0 && any_deductions && (template_args_variadic_p (TREE_PURPOSE (pat1)) || template_args_variadic_p (TREE_PURPOSE (pat2)))) { tree args1 = INNERMOST_TEMPLATE_ARGS (TREE_PURPOSE (pat1)); tree args2 = INNERMOST_TEMPLATE_ARGS (TREE_PURPOSE (pat2)); int len1 = TREE_VEC_LENGTH (args1); int len2 = TREE_VEC_LENGTH (args2); /* We don't count the pack expansion at the end. */ if (template_args_variadic_p (TREE_PURPOSE (pat1))) --len1; if (template_args_variadic_p (TREE_PURPOSE (pat2))) --len2; if (len1 > len2) return 1; else if (len1 < len2) return -1; } return winner; } /* Return the template arguments that will produce the function signature DECL from the function template FN, with the explicit template arguments EXPLICIT_ARGS. If CHECK_RETTYPE is true, the return type must also match. Return NULL_TREE if no satisfactory arguments could be found. */ static tree get_bindings (tree fn, tree decl, tree explicit_args, bool check_rettype) { int ntparms = DECL_NTPARMS (fn); tree targs = make_tree_vec (ntparms); tree decl_type = TREE_TYPE (decl); tree decl_arg_types; tree *args; unsigned int nargs, ix; tree arg; gcc_assert (decl != DECL_TEMPLATE_RESULT (fn)); /* Never do unification on the 'this' parameter. */ decl_arg_types = skip_artificial_parms_for (decl, TYPE_ARG_TYPES (decl_type)); nargs = list_length (decl_arg_types); args = XALLOCAVEC (tree, nargs); for (arg = decl_arg_types, ix = 0; arg != NULL_TREE && arg != void_list_node; arg = TREE_CHAIN (arg), ++ix) args[ix] = TREE_VALUE (arg); if (fn_type_unification (fn, explicit_args, targs, args, ix, (check_rettype || DECL_CONV_FN_P (fn) ? TREE_TYPE (decl_type) : NULL_TREE), DEDUCE_EXACT, LOOKUP_NORMAL, NULL, /*explain_p=*/false, /*decltype*/false) == error_mark_node) return NULL_TREE; return targs; } /* Return the innermost template arguments that, when applied to a partial specialization SPEC_TMPL of TMPL, yield the ARGS. For example, suppose we have: template <class T, class U> struct S {}; template <class T> struct S<T*, int> {}; Then, suppose we want to get `S<double*, int>'. SPEC_TMPL will be the partial specialization and the ARGS will be {double*, int}. The resulting vector will be {double}, indicating that `T' is bound to `double'. */ static tree get_partial_spec_bindings (tree tmpl, tree spec_tmpl, tree args) { tree tparms = DECL_INNERMOST_TEMPLATE_PARMS (spec_tmpl); tree spec_args = TI_ARGS (get_template_info (DECL_TEMPLATE_RESULT (spec_tmpl))); int i, ntparms = TREE_VEC_LENGTH (tparms); tree deduced_args; tree innermost_deduced_args; innermost_deduced_args = make_tree_vec (ntparms); if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args)) { deduced_args = copy_node (args); SET_TMPL_ARGS_LEVEL (deduced_args, TMPL_ARGS_DEPTH (deduced_args), innermost_deduced_args); } else deduced_args = innermost_deduced_args; bool tried_array_deduction = (cxx_dialect < cxx17); again: if (unify (tparms, deduced_args, INNERMOST_TEMPLATE_ARGS (spec_args), INNERMOST_TEMPLATE_ARGS (args), UNIFY_ALLOW_NONE, /*explain_p=*/false)) return NULL_TREE; for (i = 0; i < ntparms; ++i) if (! TREE_VEC_ELT (innermost_deduced_args, i)) { if (!tried_array_deduction) { try_array_deduction (tparms, innermost_deduced_args, INNERMOST_TEMPLATE_ARGS (spec_args)); tried_array_deduction = true; if (TREE_VEC_ELT (innermost_deduced_args, i)) goto again; } return NULL_TREE; } if (!push_tinst_level (spec_tmpl, deduced_args)) { excessive_deduction_depth = true; return NULL_TREE; } /* Verify that nondeduced template arguments agree with the type obtained from argument deduction. For example: struct A { typedef int X; }; template <class T, class U> struct C {}; template <class T> struct C<T, typename T::X> {}; Then with the instantiation `C<A, int>', we can deduce that `T' is `A' but unify () does not check whether `typename T::X' is `int'. */ spec_args = tsubst (spec_args, deduced_args, tf_none, NULL_TREE); if (spec_args != error_mark_node) spec_args = coerce_template_parms (DECL_INNERMOST_TEMPLATE_PARMS (tmpl), INNERMOST_TEMPLATE_ARGS (spec_args), tmpl, tf_none, false, false); pop_tinst_level (); if (spec_args == error_mark_node /* We only need to check the innermost arguments; the other arguments will always agree. */ || !comp_template_args_porder (INNERMOST_TEMPLATE_ARGS (spec_args), INNERMOST_TEMPLATE_ARGS (args))) return NULL_TREE; /* Now that we have bindings for all of the template arguments, ensure that the arguments deduced for the template template parameters have compatible template parameter lists. See the use of template_template_parm_bindings_ok_p in fn_type_unification for more information. */ if (!template_template_parm_bindings_ok_p (tparms, deduced_args)) return NULL_TREE; return deduced_args; } // Compare two function templates T1 and T2 by deducing bindings // from one against the other. If both deductions succeed, compare // constraints to see which is more constrained. static int more_specialized_inst (tree t1, tree t2) { int fate = 0; int count = 0; if (get_bindings (t1, DECL_TEMPLATE_RESULT (t2), NULL_TREE, true)) { --fate; ++count; } if (get_bindings (t2, DECL_TEMPLATE_RESULT (t1), NULL_TREE, true)) { ++fate; ++count; } // If both deductions succeed, then one may be more constrained. if (count == 2 && fate == 0) fate = more_constrained (t1, t2); return fate; } /* TEMPLATES is a TREE_LIST. Each TREE_VALUE is a TEMPLATE_DECL. Return the TREE_LIST node with the most specialized template, if any. If there is no most specialized template, the error_mark_node is returned. Note that this function does not look at, or modify, the TREE_PURPOSE or TREE_TYPE of any of the nodes. Since the node returned is one of the elements of INSTANTIATIONS, callers may store information in the TREE_PURPOSE or TREE_TYPE of the nodes, and retrieve it from the value returned. */ tree most_specialized_instantiation (tree templates) { tree fn, champ; ++processing_template_decl; champ = templates; for (fn = TREE_CHAIN (templates); fn; fn = TREE_CHAIN (fn)) { gcc_assert (TREE_VALUE (champ) != TREE_VALUE (fn)); int fate = more_specialized_inst (TREE_VALUE (champ), TREE_VALUE (fn)); if (fate == -1) champ = fn; else if (!fate) { /* Equally specialized, move to next function. If there is no next function, nothing's most specialized. */ fn = TREE_CHAIN (fn); champ = fn; if (!fn) break; } } if (champ) /* Now verify that champ is better than everything earlier in the instantiation list. */ for (fn = templates; fn != champ; fn = TREE_CHAIN (fn)) { if (more_specialized_inst (TREE_VALUE (champ), TREE_VALUE (fn)) != 1) { champ = NULL_TREE; break; } } processing_template_decl--; if (!champ) return error_mark_node; return champ; } /* If DECL is a specialization of some template, return the most general such template. Otherwise, returns NULL_TREE. For example, given: template <class T> struct S { template <class U> void f(U); }; if TMPL is `template <class U> void S<int>::f(U)' this will return the full template. This function will not trace past partial specializations, however. For example, given in addition: template <class T> struct S<T*> { template <class U> void f(U); }; if TMPL is `template <class U> void S<int*>::f(U)' this will return `template <class T> template <class U> S<T*>::f(U)'. */ tree most_general_template (tree decl) { if (TREE_CODE (decl) != TEMPLATE_DECL) { if (tree tinfo = get_template_info (decl)) decl = TI_TEMPLATE (tinfo); /* The TI_TEMPLATE can be an IDENTIFIER_NODE for a template friend, or a FIELD_DECL for a capture pack. */ if (TREE_CODE (decl) != TEMPLATE_DECL) return NULL_TREE; } /* Look for more and more general templates. */ while (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl)) { /* The DECL_TI_TEMPLATE can be an IDENTIFIER_NODE in some cases. (See cp-tree.h for details.) */ if (TREE_CODE (DECL_TI_TEMPLATE (decl)) != TEMPLATE_DECL) break; if (CLASS_TYPE_P (TREE_TYPE (decl)) && !TYPE_DECL_ALIAS_P (TYPE_NAME (TREE_TYPE (decl))) && CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (decl))) break; /* Stop if we run into an explicitly specialized class template. */ if (!DECL_NAMESPACE_SCOPE_P (decl) && DECL_CONTEXT (decl) && CLASSTYPE_TEMPLATE_SPECIALIZATION (DECL_CONTEXT (decl))) break; decl = DECL_TI_TEMPLATE (decl); } return decl; } /* Return the most specialized of the template partial specializations which can produce TARGET, a specialization of some class or variable template. The value returned is actually a TREE_LIST; the TREE_VALUE is a TEMPLATE_DECL node corresponding to the partial specialization, while the TREE_PURPOSE is the set of template arguments that must be substituted into the template pattern in order to generate TARGET. If the choice of partial specialization is ambiguous, a diagnostic is issued, and the error_mark_node is returned. If there are no partial specializations matching TARGET, then NULL_TREE is returned, indicating that the primary template should be used. */ tree most_specialized_partial_spec (tree target, tsubst_flags_t complain) { tree list = NULL_TREE; tree t; tree champ; int fate; bool ambiguous_p; tree outer_args = NULL_TREE; tree tmpl, args; if (TYPE_P (target)) { tree tinfo = CLASSTYPE_TEMPLATE_INFO (target); tmpl = TI_TEMPLATE (tinfo); args = TI_ARGS (tinfo); } else if (TREE_CODE (target) == TEMPLATE_ID_EXPR) { tmpl = TREE_OPERAND (target, 0); args = TREE_OPERAND (target, 1); } else if (VAR_P (target)) { tree tinfo = DECL_TEMPLATE_INFO (target); tmpl = TI_TEMPLATE (tinfo); args = TI_ARGS (tinfo); } else gcc_unreachable (); tree main_tmpl = most_general_template (tmpl); /* For determining which partial specialization to use, only the innermost args are interesting. */ if (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (args)) { outer_args = strip_innermost_template_args (args, 1); args = INNERMOST_TEMPLATE_ARGS (args); } /* The caller hasn't called push_to_top_level yet, but we need get_partial_spec_bindings to be done in non-template context so that we'll fully resolve everything. */ processing_template_decl_sentinel ptds; for (t = DECL_TEMPLATE_SPECIALIZATIONS (main_tmpl); t; t = TREE_CHAIN (t)) { const tree ospec_tmpl = TREE_VALUE (t); tree spec_tmpl; if (outer_args) { /* Substitute in the template args from the enclosing class. */ ++processing_template_decl; spec_tmpl = tsubst (ospec_tmpl, outer_args, tf_none, NULL_TREE); --processing_template_decl; if (spec_tmpl == error_mark_node) return error_mark_node; } else spec_tmpl = ospec_tmpl; tree spec_args = get_partial_spec_bindings (tmpl, spec_tmpl, args); if (spec_args) { if (outer_args) spec_args = add_to_template_args (outer_args, spec_args); /* Keep the candidate only if the constraints are satisfied, or if we're not compiling with concepts. */ if (!flag_concepts || constraints_satisfied_p (ospec_tmpl, spec_args)) { list = tree_cons (spec_args, ospec_tmpl, list); TREE_TYPE (list) = TREE_TYPE (t); } } } if (! list) return NULL_TREE; ambiguous_p = false; t = list; champ = t; t = TREE_CHAIN (t); for (; t; t = TREE_CHAIN (t)) { fate = more_specialized_partial_spec (tmpl, champ, t); if (fate == 1) ; else { if (fate == 0) { t = TREE_CHAIN (t); if (! t) { ambiguous_p = true; break; } } champ = t; } } if (!ambiguous_p) for (t = list; t && t != champ; t = TREE_CHAIN (t)) { fate = more_specialized_partial_spec (tmpl, champ, t); if (fate != 1) { ambiguous_p = true; break; } } if (ambiguous_p) { const char *str; char *spaces = NULL; if (!(complain & tf_error)) return error_mark_node; if (TYPE_P (target)) error ("ambiguous template instantiation for %q#T", target); else error ("ambiguous template instantiation for %q#D", target); str = ngettext ("candidate is:", "candidates are:", list_length (list)); for (t = list; t; t = TREE_CHAIN (t)) { tree subst = build_tree_list (TREE_VALUE (t), TREE_PURPOSE (t)); inform (DECL_SOURCE_LOCATION (TREE_VALUE (t)), "%s %#qS", spaces ? spaces : str, subst); spaces = spaces ? spaces : get_spaces (str); } free (spaces); return error_mark_node; } return champ; } /* Explicitly instantiate DECL. */ void do_decl_instantiation (tree decl, tree storage) { tree result = NULL_TREE; int extern_p = 0; if (!decl || decl == error_mark_node) /* An error occurred, for which grokdeclarator has already issued an appropriate message. */ return; else if (! DECL_LANG_SPECIFIC (decl)) { error ("explicit instantiation of non-template %q#D", decl); return; } else if (DECL_DECLARED_CONCEPT_P (decl)) { if (VAR_P (decl)) error ("explicit instantiation of variable concept %q#D", decl); else error ("explicit instantiation of function concept %q#D", decl); return; } bool var_templ = (DECL_TEMPLATE_INFO (decl) && variable_template_p (DECL_TI_TEMPLATE (decl))); if (VAR_P (decl) && !var_templ) { /* There is an asymmetry here in the way VAR_DECLs and FUNCTION_DECLs are handled by grokdeclarator. In the case of the latter, the DECL we get back will be marked as a template instantiation, and the appropriate DECL_TEMPLATE_INFO will be set up. This does not happen for VAR_DECLs so we do the lookup here. Probably, grokdeclarator should handle VAR_DECLs as it currently handles FUNCTION_DECLs. */ if (!DECL_CLASS_SCOPE_P (decl)) { error ("%qD is not a static data member of a class template", decl); return; } result = lookup_field (DECL_CONTEXT (decl), DECL_NAME (decl), 0, false); if (!result || !VAR_P (result)) { error ("no matching template for %qD found", decl); return; } if (!same_type_p (TREE_TYPE (result), TREE_TYPE (decl))) { error ("type %qT for explicit instantiation %qD does not match " "declared type %qT", TREE_TYPE (result), decl, TREE_TYPE (decl)); return; } } else if (TREE_CODE (decl) != FUNCTION_DECL && !var_templ) { error ("explicit instantiation of %q#D", decl); return; } else result = decl; /* Check for various error cases. Note that if the explicit instantiation is valid the RESULT will currently be marked as an *implicit* instantiation; DECL_EXPLICIT_INSTANTIATION is not set until we get here. */ if (DECL_TEMPLATE_SPECIALIZATION (result)) { /* DR 259 [temp.spec]. Both an explicit instantiation and a declaration of an explicit specialization shall not appear in a program unless the explicit instantiation follows a declaration of the explicit specialization. For a given set of template parameters, if an explicit instantiation of a template appears after a declaration of an explicit specialization for that template, the explicit instantiation has no effect. */ return; } else if (DECL_EXPLICIT_INSTANTIATION (result)) { /* [temp.spec] No program shall explicitly instantiate any template more than once. We check DECL_NOT_REALLY_EXTERN so as not to complain when the first instantiation was `extern' and the second is not, and EXTERN_P for the opposite case. */ if (DECL_NOT_REALLY_EXTERN (result) && !extern_p) permerror (input_location, "duplicate explicit instantiation of %q#D", result); /* If an "extern" explicit instantiation follows an ordinary explicit instantiation, the template is instantiated. */ if (extern_p) return; } else if (!DECL_IMPLICIT_INSTANTIATION (result)) { error ("no matching template for %qD found", result); return; } else if (!DECL_TEMPLATE_INFO (result)) { permerror (input_location, "explicit instantiation of non-template %q#D", result); return; } if (storage == NULL_TREE) ; else if (storage == ridpointers[(int) RID_EXTERN]) { if (cxx_dialect == cxx98) pedwarn (input_location, OPT_Wpedantic, "ISO C++ 1998 forbids the use of %<extern%> on explicit " "instantiations"); extern_p = 1; } else error ("storage class %qD applied to template instantiation", storage); check_explicit_instantiation_namespace (result); mark_decl_instantiated (result, extern_p); if (! extern_p) instantiate_decl (result, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/false); } static void mark_class_instantiated (tree t, int extern_p) { SET_CLASSTYPE_EXPLICIT_INSTANTIATION (t); SET_CLASSTYPE_INTERFACE_KNOWN (t); CLASSTYPE_INTERFACE_ONLY (t) = extern_p; TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (t)) = extern_p; if (! extern_p) { CLASSTYPE_DEBUG_REQUESTED (t) = 1; rest_of_type_compilation (t, 1); } } /* Called from do_type_instantiation through binding_table_foreach to do recursive instantiation for the type bound in ENTRY. */ static void bt_instantiate_type_proc (binding_entry entry, void *data) { tree storage = *(tree *) data; if (MAYBE_CLASS_TYPE_P (entry->type) && CLASSTYPE_TEMPLATE_INFO (entry->type) && !uses_template_parms (CLASSTYPE_TI_ARGS (entry->type))) do_type_instantiation (TYPE_MAIN_DECL (entry->type), storage, 0); } /* Perform an explicit instantiation of template class T. STORAGE, if non-null, is the RID for extern, inline or static. COMPLAIN is nonzero if this is called from the parser, zero if called recursively, since the standard is unclear (as detailed below). */ void do_type_instantiation (tree t, tree storage, tsubst_flags_t complain) { int extern_p = 0; int nomem_p = 0; int static_p = 0; int previous_instantiation_extern_p = 0; if (TREE_CODE (t) == TYPE_DECL) t = TREE_TYPE (t); if (! CLASS_TYPE_P (t) || ! CLASSTYPE_TEMPLATE_INFO (t)) { tree tmpl = (TYPE_TEMPLATE_INFO (t)) ? TYPE_TI_TEMPLATE (t) : NULL; if (tmpl) error ("explicit instantiation of non-class template %qD", tmpl); else error ("explicit instantiation of non-template type %qT", t); return; } complete_type (t); if (!COMPLETE_TYPE_P (t)) { if (complain & tf_error) error ("explicit instantiation of %q#T before definition of template", t); return; } if (storage != NULL_TREE) { if (storage == ridpointers[(int) RID_EXTERN]) { if (cxx_dialect == cxx98) pedwarn (input_location, OPT_Wpedantic, "ISO C++ 1998 forbids the use of %<extern%> on " "explicit instantiations"); } else pedwarn (input_location, OPT_Wpedantic, "ISO C++ forbids the use of %qE" " on explicit instantiations", storage); if (storage == ridpointers[(int) RID_INLINE]) nomem_p = 1; else if (storage == ridpointers[(int) RID_EXTERN]) extern_p = 1; else if (storage == ridpointers[(int) RID_STATIC]) static_p = 1; else { error ("storage class %qD applied to template instantiation", storage); extern_p = 0; } } if (CLASSTYPE_TEMPLATE_SPECIALIZATION (t)) { /* DR 259 [temp.spec]. Both an explicit instantiation and a declaration of an explicit specialization shall not appear in a program unless the explicit instantiation follows a declaration of the explicit specialization. For a given set of template parameters, if an explicit instantiation of a template appears after a declaration of an explicit specialization for that template, the explicit instantiation has no effect. */ return; } else if (CLASSTYPE_EXPLICIT_INSTANTIATION (t)) { /* [temp.spec] No program shall explicitly instantiate any template more than once. If PREVIOUS_INSTANTIATION_EXTERN_P, then the first explicit instantiation was `extern'. If EXTERN_P then the second is. These cases are OK. */ previous_instantiation_extern_p = CLASSTYPE_INTERFACE_ONLY (t); if (!previous_instantiation_extern_p && !extern_p && (complain & tf_error)) permerror (input_location, "duplicate explicit instantiation of %q#T", t); /* If we've already instantiated the template, just return now. */ if (!CLASSTYPE_INTERFACE_ONLY (t)) return; } check_explicit_instantiation_namespace (TYPE_NAME (t)); mark_class_instantiated (t, extern_p); if (nomem_p) return; /* In contrast to implicit instantiation, where only the declarations, and not the definitions, of members are instantiated, we have here: [temp.explicit] An explicit instantiation that names a class template specialization is also an explicit instantiation of the same kind (declaration or definition) of each of its members (not including members inherited from base classes and members that are templates) that has not been previously explicitly specialized in the translation unit containing the explicit instantiation, provided that the associated constraints, if any, of that member are satisfied by the template arguments of the explicit instantiation. */ for (tree fld = TYPE_FIELDS (t); fld; fld = DECL_CHAIN (fld)) if ((VAR_P (fld) || (TREE_CODE (fld) == FUNCTION_DECL && !static_p && user_provided_p (fld))) && DECL_TEMPLATE_INSTANTIATION (fld) && constraints_satisfied_p (fld)) { mark_decl_instantiated (fld, extern_p); if (! extern_p) instantiate_decl (fld, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/true); } if (CLASSTYPE_NESTED_UTDS (t)) binding_table_foreach (CLASSTYPE_NESTED_UTDS (t), bt_instantiate_type_proc, &storage); } /* Given a function DECL, which is a specialization of TMPL, modify DECL to be a re-instantiation of TMPL with the same template arguments. TMPL should be the template into which tsubst'ing should occur for DECL, not the most general template. One reason for doing this is a scenario like this: template <class T> void f(const T&, int i); void g() { f(3, 7); } template <class T> void f(const T& t, const int i) { } Note that when the template is first instantiated, with instantiate_template, the resulting DECL will have no name for the first parameter, and the wrong type for the second. So, when we go to instantiate the DECL, we regenerate it. */ static void regenerate_decl_from_template (tree decl, tree tmpl, tree args) { /* The arguments used to instantiate DECL, from the most general template. */ tree code_pattern; code_pattern = DECL_TEMPLATE_RESULT (tmpl); /* Make sure that we can see identifiers, and compute access correctly. */ push_access_scope (decl); if (TREE_CODE (decl) == FUNCTION_DECL) { tree decl_parm; tree pattern_parm; tree specs; int args_depth; int parms_depth; args_depth = TMPL_ARGS_DEPTH (args); parms_depth = TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (tmpl)); if (args_depth > parms_depth) args = get_innermost_template_args (args, parms_depth); /* Instantiate a dynamic exception-specification. noexcept will be handled below. */ if (tree raises = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (code_pattern))) if (TREE_VALUE (raises)) { specs = tsubst_exception_specification (TREE_TYPE (code_pattern), args, tf_error, NULL_TREE, /*defer_ok*/false); if (specs && specs != error_mark_node) TREE_TYPE (decl) = build_exception_variant (TREE_TYPE (decl), specs); } /* Merge parameter declarations. */ decl_parm = skip_artificial_parms_for (decl, DECL_ARGUMENTS (decl)); pattern_parm = skip_artificial_parms_for (code_pattern, DECL_ARGUMENTS (code_pattern)); while (decl_parm && !DECL_PACK_P (pattern_parm)) { tree parm_type; tree attributes; if (DECL_NAME (decl_parm) != DECL_NAME (pattern_parm)) DECL_NAME (decl_parm) = DECL_NAME (pattern_parm); parm_type = tsubst (TREE_TYPE (pattern_parm), args, tf_error, NULL_TREE); parm_type = type_decays_to (parm_type); if (!same_type_p (TREE_TYPE (decl_parm), parm_type)) TREE_TYPE (decl_parm) = parm_type; attributes = DECL_ATTRIBUTES (pattern_parm); if (DECL_ATTRIBUTES (decl_parm) != attributes) { DECL_ATTRIBUTES (decl_parm) = attributes; cplus_decl_attributes (&decl_parm, attributes, /*flags=*/0); } decl_parm = DECL_CHAIN (decl_parm); pattern_parm = DECL_CHAIN (pattern_parm); } /* Merge any parameters that match with the function parameter pack. */ if (pattern_parm && DECL_PACK_P (pattern_parm)) { int i, len; tree expanded_types; /* Expand the TYPE_PACK_EXPANSION that provides the types for the parameters in this function parameter pack. */ expanded_types = tsubst_pack_expansion (TREE_TYPE (pattern_parm), args, tf_error, NULL_TREE); len = TREE_VEC_LENGTH (expanded_types); for (i = 0; i < len; i++) { tree parm_type; tree attributes; if (DECL_NAME (decl_parm) != DECL_NAME (pattern_parm)) /* Rename the parameter to include the index. */ DECL_NAME (decl_parm) = make_ith_pack_parameter_name (DECL_NAME (pattern_parm), i); parm_type = TREE_VEC_ELT (expanded_types, i); parm_type = type_decays_to (parm_type); if (!same_type_p (TREE_TYPE (decl_parm), parm_type)) TREE_TYPE (decl_parm) = parm_type; attributes = DECL_ATTRIBUTES (pattern_parm); if (DECL_ATTRIBUTES (decl_parm) != attributes) { DECL_ATTRIBUTES (decl_parm) = attributes; cplus_decl_attributes (&decl_parm, attributes, /*flags=*/0); } decl_parm = DECL_CHAIN (decl_parm); } } /* Merge additional specifiers from the CODE_PATTERN. */ if (DECL_DECLARED_INLINE_P (code_pattern) && !DECL_DECLARED_INLINE_P (decl)) DECL_DECLARED_INLINE_P (decl) = 1; maybe_instantiate_noexcept (decl, tf_error); } else if (VAR_P (decl)) { start_lambda_scope (decl); DECL_INITIAL (decl) = tsubst_init (DECL_INITIAL (code_pattern), decl, args, tf_error, DECL_TI_TEMPLATE (decl)); finish_lambda_scope (); if (VAR_HAD_UNKNOWN_BOUND (decl)) TREE_TYPE (decl) = tsubst (TREE_TYPE (code_pattern), args, tf_error, DECL_TI_TEMPLATE (decl)); } else gcc_unreachable (); pop_access_scope (decl); } /* Return the TEMPLATE_DECL into which DECL_TI_ARGS(DECL) should be substituted to get DECL. */ tree template_for_substitution (tree decl) { tree tmpl = DECL_TI_TEMPLATE (decl); /* Set TMPL to the template whose DECL_TEMPLATE_RESULT is the pattern for the instantiation. This is not always the most general template. Consider, for example: template <class T> struct S { template <class U> void f(); template <> void f<int>(); }; and an instantiation of S<double>::f<int>. We want TD to be the specialization S<T>::f<int>, not the more general S<T>::f<U>. */ while (/* An instantiation cannot have a definition, so we need a more general template. */ DECL_TEMPLATE_INSTANTIATION (tmpl) /* We must also deal with friend templates. Given: template <class T> struct S { template <class U> friend void f() {}; }; S<int>::f<U> say, is not an instantiation of S<T>::f<U>, so far as the language is concerned, but that's still where we get the pattern for the instantiation from. On other hand, if the definition comes outside the class, say: template <class T> struct S { template <class U> friend void f(); }; template <class U> friend void f() {} we don't need to look any further. That's what the check for DECL_INITIAL is for. */ || (TREE_CODE (decl) == FUNCTION_DECL && DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (tmpl) && !DECL_INITIAL (DECL_TEMPLATE_RESULT (tmpl)))) { /* The present template, TD, should not be a definition. If it were a definition, we should be using it! Note that we cannot restructure the loop to just keep going until we find a template with a definition, since that might go too far if a specialization was declared, but not defined. */ /* Fetch the more general template. */ tmpl = DECL_TI_TEMPLATE (tmpl); } return tmpl; } /* Returns true if we need to instantiate this template instance even if we know we aren't going to emit it. */ bool always_instantiate_p (tree decl) { /* We always instantiate inline functions so that we can inline them. An explicit instantiation declaration prohibits implicit instantiation of non-inline functions. With high levels of optimization, we would normally inline non-inline functions -- but we're not allowed to do that for "extern template" functions. Therefore, we check DECL_DECLARED_INLINE_P, rather than possibly_inlined_p. */ return ((TREE_CODE (decl) == FUNCTION_DECL && (DECL_DECLARED_INLINE_P (decl) || type_uses_auto (TREE_TYPE (TREE_TYPE (decl))))) /* And we need to instantiate static data members so that their initializers are available in integral constant expressions. */ || (VAR_P (decl) && decl_maybe_constant_var_p (decl))); } /* If FN has a noexcept-specifier that hasn't been instantiated yet, instantiate it now, modifying TREE_TYPE (fn). Returns false on error, true otherwise. */ bool maybe_instantiate_noexcept (tree fn, tsubst_flags_t complain) { tree fntype, spec, noex; /* Don't instantiate a noexcept-specification from template context. */ if (processing_template_decl && (!flag_noexcept_type || type_dependent_expression_p (fn))) return true; if (DECL_MAYBE_DELETED (fn)) { if (fn == current_function_decl) /* We're in start_preparsed_function, keep going. */ return true; ++function_depth; synthesize_method (fn); --function_depth; return !DECL_MAYBE_DELETED (fn); } fntype = TREE_TYPE (fn); spec = TYPE_RAISES_EXCEPTIONS (fntype); if (!spec || !TREE_PURPOSE (spec)) return true; noex = TREE_PURPOSE (spec); if (TREE_CODE (noex) != DEFERRED_NOEXCEPT && TREE_CODE (noex) != DEFERRED_PARSE) return true; tree orig_fn = NULL_TREE; /* For a member friend template we can get a TEMPLATE_DECL. Let's use its FUNCTION_DECL for the rest of this function -- push_access_scope doesn't accept TEMPLATE_DECLs. */ if (DECL_FUNCTION_TEMPLATE_P (fn)) { orig_fn = fn; fn = DECL_TEMPLATE_RESULT (fn); } if (DECL_CLONED_FUNCTION_P (fn)) { tree prime = DECL_CLONED_FUNCTION (fn); if (!maybe_instantiate_noexcept (prime, complain)) return false; spec = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (prime)); } else if (TREE_CODE (noex) == DEFERRED_NOEXCEPT) { static hash_set<tree>* fns = new hash_set<tree>; bool added = false; if (DEFERRED_NOEXCEPT_PATTERN (noex) == NULL_TREE) { spec = get_defaulted_eh_spec (fn, complain); if (spec == error_mark_node) /* This might have failed because of an unparsed DMI, so let's try again later. */ return false; } else if (!(added = !fns->add (fn))) { /* If hash_set::add returns true, the element was already there. */ location_t loc = cp_expr_loc_or_loc (DEFERRED_NOEXCEPT_PATTERN (noex), DECL_SOURCE_LOCATION (fn)); error_at (loc, "exception specification of %qD depends on itself", fn); spec = noexcept_false_spec; } else if (push_tinst_level (fn)) { push_to_top_level (); push_access_scope (fn); push_deferring_access_checks (dk_no_deferred); input_location = DECL_SOURCE_LOCATION (fn); if (!DECL_LOCAL_DECL_P (fn)) { /* If needed, set current_class_ptr for the benefit of tsubst_copy/PARM_DECL. The exception pattern will refer to the parm of the template, not the instantiation. */ tree tdecl = DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (fn)); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (tdecl)) { tree this_parm = DECL_ARGUMENTS (tdecl); current_class_ptr = NULL_TREE; current_class_ref = cp_build_fold_indirect_ref (this_parm); current_class_ptr = this_parm; } } /* If this function is represented by a TEMPLATE_DECL, then the deferred noexcept-specification might still contain dependent types, even after substitution. And we need the dependency check functions to work in build_noexcept_spec. */ if (orig_fn) ++processing_template_decl; /* Do deferred instantiation of the noexcept-specifier. */ noex = tsubst_copy_and_build (DEFERRED_NOEXCEPT_PATTERN (noex), DEFERRED_NOEXCEPT_ARGS (noex), tf_warning_or_error, fn, /*function_p=*/false, /*i_c_e_p=*/true); /* Build up the noexcept-specification. */ spec = build_noexcept_spec (noex, tf_warning_or_error); if (orig_fn) --processing_template_decl; pop_deferring_access_checks (); pop_access_scope (fn); pop_tinst_level (); pop_from_top_level (); } else spec = noexcept_false_spec; if (added) fns->remove (fn); } if (spec == error_mark_node) { /* This failed with a hard error, so let's go with false. */ gcc_assert (seen_error ()); spec = noexcept_false_spec; } TREE_TYPE (fn) = build_exception_variant (fntype, spec); if (orig_fn) TREE_TYPE (orig_fn) = TREE_TYPE (fn); return true; } /* We're starting to process the function INST, an instantiation of PATTERN; add their parameters to local_specializations. */ static void register_parameter_specializations (tree pattern, tree inst) { tree tmpl_parm = DECL_ARGUMENTS (pattern); tree spec_parm = DECL_ARGUMENTS (inst); if (DECL_NONSTATIC_MEMBER_FUNCTION_P (inst)) { register_local_specialization (spec_parm, tmpl_parm); spec_parm = skip_artificial_parms_for (inst, spec_parm); tmpl_parm = skip_artificial_parms_for (pattern, tmpl_parm); } for (; tmpl_parm; tmpl_parm = DECL_CHAIN (tmpl_parm)) { if (!DECL_PACK_P (tmpl_parm) || (spec_parm && DECL_PACK_P (spec_parm))) { register_local_specialization (spec_parm, tmpl_parm); spec_parm = DECL_CHAIN (spec_parm); } else { /* Register the (value) argument pack as a specialization of TMPL_PARM, then move on. */ tree argpack = extract_fnparm_pack (tmpl_parm, &spec_parm); register_local_specialization (argpack, tmpl_parm); } } gcc_assert (!spec_parm); } /* Instantiate the body of D using PATTERN with ARGS. We have already determined PATTERN is the correct template to use. NESTED_P is true if this is a nested function, in which case PATTERN will be a FUNCTION_DECL not a TEMPLATE_DECL. */ static void instantiate_body (tree pattern, tree args, tree d, bool nested_p) { tree td = NULL_TREE; tree code_pattern = pattern; if (!nested_p) { td = pattern; code_pattern = DECL_TEMPLATE_RESULT (td); } else /* Only OMP reductions are nested. */ gcc_checking_assert (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)); vec<tree> omp_privatization_save; if (current_function_decl) save_omp_privatization_clauses (omp_privatization_save); bool push_to_top = !(current_function_decl && !LAMBDA_FUNCTION_P (d) && decl_function_context (d) == current_function_decl); if (push_to_top) push_to_top_level (); else { gcc_assert (!processing_template_decl); push_function_context (); cp_unevaluated_operand = 0; c_inhibit_evaluation_warnings = 0; } if (VAR_P (d)) { /* The variable might be a lambda's extra scope, and that lambda's visibility depends on D's. */ maybe_commonize_var (d); determine_visibility (d); } /* Mark D as instantiated so that recursive calls to instantiate_decl do not try to instantiate it again. */ DECL_TEMPLATE_INSTANTIATED (d) = 1; if (td) /* Regenerate the declaration in case the template has been modified by a subsequent redeclaration. */ regenerate_decl_from_template (d, td, args); /* We already set the file and line above. Reset them now in case they changed as a result of calling regenerate_decl_from_template. */ input_location = DECL_SOURCE_LOCATION (d); if (VAR_P (d)) { tree init; bool const_init = false; /* Clear out DECL_RTL; whatever was there before may not be right since we've reset the type of the declaration. */ SET_DECL_RTL (d, NULL); DECL_IN_AGGR_P (d) = 0; /* The initializer is placed in DECL_INITIAL by regenerate_decl_from_template so we don't need to push/pop_access_scope again here. Pull it out so that cp_finish_decl can process it. */ init = DECL_INITIAL (d); DECL_INITIAL (d) = NULL_TREE; DECL_INITIALIZED_P (d) = 0; /* Clear DECL_EXTERNAL so that cp_finish_decl will process the initializer. That function will defer actual emission until we have a chance to determine linkage. */ DECL_EXTERNAL (d) = 0; /* Enter the scope of D so that access-checking works correctly. */ bool enter_context = DECL_CLASS_SCOPE_P (d); if (enter_context) push_nested_class (DECL_CONTEXT (d)); const_init = DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (code_pattern); cp_finish_decl (d, init, const_init, NULL_TREE, 0); if (enter_context) pop_nested_class (); } else if (TREE_CODE (d) == FUNCTION_DECL && DECL_DEFAULTED_FN (code_pattern)) synthesize_method (d); else if (TREE_CODE (d) == FUNCTION_DECL) { /* Set up the list of local specializations. */ local_specialization_stack lss (push_to_top ? lss_blank : lss_copy); tree block = NULL_TREE; /* Set up context. */ if (nested_p) block = push_stmt_list (); else start_preparsed_function (d, NULL_TREE, SF_PRE_PARSED); perform_instantiation_time_access_checks (code_pattern, args); /* Create substitution entries for the parameters. */ register_parameter_specializations (code_pattern, d); /* Substitute into the body of the function. */ if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)) tsubst_omp_udr (DECL_SAVED_TREE (code_pattern), args, tf_warning_or_error, d); else { tsubst_expr (DECL_SAVED_TREE (code_pattern), args, tf_warning_or_error, DECL_TI_TEMPLATE (d), /*integral_constant_expression_p=*/false); /* Set the current input_location to the end of the function so that finish_function knows where we are. */ input_location = DECL_STRUCT_FUNCTION (code_pattern)->function_end_locus; /* Remember if we saw an infinite loop in the template. */ current_function_infinite_loop = DECL_STRUCT_FUNCTION (code_pattern)->language->infinite_loop; } /* Finish the function. */ if (nested_p) DECL_SAVED_TREE (d) = pop_stmt_list (block); else { d = finish_function (/*inline_p=*/false); expand_or_defer_fn (d); } if (DECL_OMP_DECLARE_REDUCTION_P (code_pattern)) cp_check_omp_declare_reduction (d); } /* We're not deferring instantiation any more. */ TI_PENDING_TEMPLATE_FLAG (DECL_TEMPLATE_INFO (d)) = 0; if (push_to_top) pop_from_top_level (); else pop_function_context (); if (current_function_decl) restore_omp_privatization_clauses (omp_privatization_save); } /* Produce the definition of D, a _DECL generated from a template. If DEFER_OK is true, then we don't have to actually do the instantiation now; we just have to do it sometime. Normally it is an error if this is an explicit instantiation but D is undefined. EXPL_INST_CLASS_MEM_P is true iff D is a member of an explicitly instantiated class template. */ tree instantiate_decl (tree d, bool defer_ok, bool expl_inst_class_mem_p) { tree tmpl = DECL_TI_TEMPLATE (d); tree gen_args; tree args; tree td; tree code_pattern; tree spec; tree gen_tmpl; bool pattern_defined; location_t saved_loc = input_location; int saved_unevaluated_operand = cp_unevaluated_operand; int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; bool external_p; bool deleted_p; /* This function should only be used to instantiate templates for functions and static member variables. */ gcc_assert (VAR_OR_FUNCTION_DECL_P (d)); /* A concept is never instantiated. */ gcc_assert (!DECL_DECLARED_CONCEPT_P (d)); gcc_checking_assert (!DECL_FUNCTION_SCOPE_P (d)); /* Variables are never deferred; if instantiation is required, they are instantiated right away. That allows for better code in the case that an expression refers to the value of the variable -- if the variable has a constant value the referring expression can take advantage of that fact. */ if (VAR_P (d)) defer_ok = false; /* Don't instantiate cloned functions. Instead, instantiate the functions they cloned. */ if (TREE_CODE (d) == FUNCTION_DECL && DECL_CLONED_FUNCTION_P (d)) d = DECL_CLONED_FUNCTION (d); if (DECL_TEMPLATE_INSTANTIATED (d) || TREE_TYPE (d) == error_mark_node || (TREE_CODE (d) == FUNCTION_DECL && DECL_DEFAULTED_FN (d) && DECL_INITIAL (d)) || DECL_TEMPLATE_SPECIALIZATION (d)) /* D has already been instantiated or explicitly specialized, so there's nothing for us to do here. It might seem reasonable to check whether or not D is an explicit instantiation, and, if so, stop here. But when an explicit instantiation is deferred until the end of the compilation, DECL_EXPLICIT_INSTANTIATION is set, even though we still need to do the instantiation. */ return d; /* Check to see whether we know that this template will be instantiated in some other file, as with "extern template" extension. */ external_p = (DECL_INTERFACE_KNOWN (d) && DECL_REALLY_EXTERN (d)); /* In general, we do not instantiate such templates. */ if (external_p && !always_instantiate_p (d)) return d; gen_tmpl = most_general_template (tmpl); gen_args = DECL_TI_ARGS (d); /* We should already have the extra args. */ gcc_checking_assert (tmpl == gen_tmpl || (TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (gen_tmpl)) == TMPL_ARGS_DEPTH (gen_args))); /* And what's in the hash table should match D. */ gcc_checking_assert ((spec = retrieve_specialization (gen_tmpl, gen_args, 0)) == d || spec == NULL_TREE); /* This needs to happen before any tsubsting. */ if (! push_tinst_level (d)) return d; timevar_push (TV_TEMPLATE_INST); /* Set TD to the template whose DECL_TEMPLATE_RESULT is the pattern for the instantiation. */ td = template_for_substitution (d); args = gen_args; if (VAR_P (d)) { /* Look up an explicit specialization, if any. */ tree tid = lookup_template_variable (gen_tmpl, gen_args); tree elt = most_specialized_partial_spec (tid, tf_warning_or_error); if (elt && elt != error_mark_node) { td = TREE_VALUE (elt); args = TREE_PURPOSE (elt); } } code_pattern = DECL_TEMPLATE_RESULT (td); /* We should never be trying to instantiate a member of a class template or partial specialization. */ gcc_assert (d != code_pattern); if ((DECL_NAMESPACE_SCOPE_P (d) && !DECL_INITIALIZED_IN_CLASS_P (d)) || DECL_TEMPLATE_SPECIALIZATION (td)) /* In the case of a friend template whose definition is provided outside the class, we may have too many arguments. Drop the ones we don't need. The same is true for specializations. */ args = get_innermost_template_args (args, TMPL_PARMS_DEPTH (DECL_TEMPLATE_PARMS (td))); if (TREE_CODE (d) == FUNCTION_DECL) { deleted_p = DECL_DELETED_FN (code_pattern); pattern_defined = ((DECL_SAVED_TREE (code_pattern) != NULL_TREE && DECL_INITIAL (code_pattern) != error_mark_node) || DECL_DEFAULTED_FN (code_pattern) || deleted_p); } else { deleted_p = false; if (DECL_CLASS_SCOPE_P (code_pattern)) pattern_defined = ! DECL_IN_AGGR_P (code_pattern); else pattern_defined = ! DECL_EXTERNAL (code_pattern); } /* We may be in the middle of deferred access check. Disable it now. */ push_deferring_access_checks (dk_no_deferred); /* Unless an explicit instantiation directive has already determined the linkage of D, remember that a definition is available for this entity. */ if (pattern_defined && !DECL_INTERFACE_KNOWN (d) && !DECL_NOT_REALLY_EXTERN (d)) mark_definable (d); DECL_SOURCE_LOCATION (td) = DECL_SOURCE_LOCATION (code_pattern); DECL_SOURCE_LOCATION (d) = DECL_SOURCE_LOCATION (code_pattern); input_location = DECL_SOURCE_LOCATION (d); /* If D is a member of an explicitly instantiated class template, and no definition is available, treat it like an implicit instantiation. */ if (!pattern_defined && expl_inst_class_mem_p && DECL_EXPLICIT_INSTANTIATION (d)) { /* Leave linkage flags alone on instantiations with anonymous visibility. */ if (TREE_PUBLIC (d)) { DECL_NOT_REALLY_EXTERN (d) = 0; DECL_INTERFACE_KNOWN (d) = 0; } SET_DECL_IMPLICIT_INSTANTIATION (d); } /* Defer all other templates, unless we have been explicitly forbidden from doing so. */ if (/* If there is no definition, we cannot instantiate the template. */ ! pattern_defined /* If it's OK to postpone instantiation, do so. */ || defer_ok /* If this is a static data member that will be defined elsewhere, we don't want to instantiate the entire data member, but we do want to instantiate the initializer so that we can substitute that elsewhere. */ || (external_p && VAR_P (d)) /* Handle here a deleted function too, avoid generating its body (c++/61080). */ || deleted_p) { /* The definition of the static data member is now required so we must substitute the initializer. */ if (VAR_P (d) && !DECL_INITIAL (d) && DECL_INITIAL (code_pattern)) { tree ns; tree init; bool const_init = false; bool enter_context = DECL_CLASS_SCOPE_P (d); ns = decl_namespace_context (d); push_nested_namespace (ns); if (enter_context) push_nested_class (DECL_CONTEXT (d)); init = tsubst_expr (DECL_INITIAL (code_pattern), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/false); /* If instantiating the initializer involved instantiating this again, don't call cp_finish_decl twice. */ if (!DECL_INITIAL (d)) { /* Make sure the initializer is still constant, in case of circular dependency (template/instantiate6.C). */ const_init = DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (code_pattern); cp_finish_decl (d, init, /*init_const_expr_p=*/const_init, /*asmspec_tree=*/NULL_TREE, LOOKUP_ONLYCONVERTING); } if (enter_context) pop_nested_class (); pop_nested_namespace (ns); } /* We restore the source position here because it's used by add_pending_template. */ input_location = saved_loc; if (at_eof && !pattern_defined && DECL_EXPLICIT_INSTANTIATION (d) && DECL_NOT_REALLY_EXTERN (d)) /* [temp.explicit] The definition of a non-exported function template, a non-exported member function template, or a non-exported member function or static data member of a class template shall be present in every translation unit in which it is explicitly instantiated. */ permerror (input_location, "explicit instantiation of %qD " "but no definition available", d); /* If we're in unevaluated context, we just wanted to get the constant value; this isn't an odr use, so don't queue a full instantiation. */ if (!cp_unevaluated_operand /* ??? Historically, we have instantiated inline functions, even when marked as "extern template". */ && !(external_p && VAR_P (d))) add_pending_template (d); } else { if (variable_template_p (gen_tmpl)) note_variable_template_instantiation (d); instantiate_body (td, args, d, false); } pop_deferring_access_checks (); timevar_pop (TV_TEMPLATE_INST); pop_tinst_level (); input_location = saved_loc; cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; return d; } /* Run through the list of templates that we wish we could instantiate, and instantiate any we can. RETRIES is the number of times we retry pending template instantiation. */ void instantiate_pending_templates (int retries) { int reconsider; location_t saved_loc = input_location; /* Instantiating templates may trigger vtable generation. This in turn may require further template instantiations. We place a limit here to avoid infinite loop. */ if (pending_templates && retries >= max_tinst_depth) { tree decl = pending_templates->tinst->maybe_get_node (); fatal_error (input_location, "template instantiation depth exceeds maximum of %d" " instantiating %q+D, possibly from virtual table generation" " (use %<-ftemplate-depth=%> to increase the maximum)", max_tinst_depth, decl); if (TREE_CODE (decl) == FUNCTION_DECL) /* Pretend that we defined it. */ DECL_INITIAL (decl) = error_mark_node; return; } do { struct pending_template **t = &pending_templates; struct pending_template *last = NULL; reconsider = 0; while (*t) { tree instantiation = reopen_tinst_level ((*t)->tinst); bool complete = false; if (TYPE_P (instantiation)) { if (!COMPLETE_TYPE_P (instantiation)) { instantiate_class_template (instantiation); if (CLASSTYPE_TEMPLATE_INSTANTIATION (instantiation)) for (tree fld = TYPE_FIELDS (instantiation); fld; fld = TREE_CHAIN (fld)) if ((VAR_P (fld) || (TREE_CODE (fld) == FUNCTION_DECL && !DECL_ARTIFICIAL (fld))) && DECL_TEMPLATE_INSTANTIATION (fld)) instantiate_decl (fld, /*defer_ok=*/false, /*expl_inst_class_mem_p=*/false); if (COMPLETE_TYPE_P (instantiation)) reconsider = 1; } complete = COMPLETE_TYPE_P (instantiation); } else { if (!DECL_TEMPLATE_SPECIALIZATION (instantiation) && !DECL_TEMPLATE_INSTANTIATED (instantiation)) { instantiation = instantiate_decl (instantiation, /*defer_ok=*/false, /*expl_inst_class_mem_p=*/false); if (DECL_TEMPLATE_INSTANTIATED (instantiation)) reconsider = 1; } complete = (DECL_TEMPLATE_SPECIALIZATION (instantiation) || DECL_TEMPLATE_INSTANTIATED (instantiation)); } if (complete) { /* If INSTANTIATION has been instantiated, then we don't need to consider it again in the future. */ struct pending_template *drop = *t; *t = (*t)->next; set_refcount_ptr (drop->tinst); pending_template_freelist ().free (drop); } else { last = *t; t = &(*t)->next; } tinst_depth = 0; set_refcount_ptr (current_tinst_level); } last_pending_template = last; } while (reconsider); input_location = saved_loc; } /* Substitute ARGVEC into T, which is a list of initializers for either base class or a non-static data member. The TREE_PURPOSEs are DECLs, and the TREE_VALUEs are the initializer values. Used by instantiate_decl. */ static tree tsubst_initializer_list (tree t, tree argvec) { tree inits = NULL_TREE; tree target_ctor = error_mark_node; for (; t; t = TREE_CHAIN (t)) { tree decl; tree init; tree expanded_bases = NULL_TREE; tree expanded_arguments = NULL_TREE; int i, len = 1; if (TREE_CODE (TREE_PURPOSE (t)) == TYPE_PACK_EXPANSION) { tree expr; tree arg; /* Expand the base class expansion type into separate base classes. */ expanded_bases = tsubst_pack_expansion (TREE_PURPOSE (t), argvec, tf_warning_or_error, NULL_TREE); if (expanded_bases == error_mark_node) continue; /* We'll be building separate TREE_LISTs of arguments for each base. */ len = TREE_VEC_LENGTH (expanded_bases); expanded_arguments = make_tree_vec (len); for (i = 0; i < len; i++) TREE_VEC_ELT (expanded_arguments, i) = NULL_TREE; /* Build a dummy EXPR_PACK_EXPANSION that will be used to expand each argument in the TREE_VALUE of t. */ expr = make_node (EXPR_PACK_EXPANSION); PACK_EXPANSION_LOCAL_P (expr) = true; PACK_EXPANSION_PARAMETER_PACKS (expr) = PACK_EXPANSION_PARAMETER_PACKS (TREE_PURPOSE (t)); if (TREE_VALUE (t) == void_type_node) /* VOID_TYPE_NODE is used to indicate value-initialization. */ { for (i = 0; i < len; i++) TREE_VEC_ELT (expanded_arguments, i) = void_type_node; } else { /* Substitute parameter packs into each argument in the TREE_LIST. */ in_base_initializer = 1; for (arg = TREE_VALUE (t); arg; arg = TREE_CHAIN (arg)) { tree expanded_exprs; /* Expand the argument. */ SET_PACK_EXPANSION_PATTERN (expr, TREE_VALUE (arg)); expanded_exprs = tsubst_pack_expansion (expr, argvec, tf_warning_or_error, NULL_TREE); if (expanded_exprs == error_mark_node) continue; /* Prepend each of the expanded expressions to the corresponding TREE_LIST in EXPANDED_ARGUMENTS. */ for (i = 0; i < len; i++) { TREE_VEC_ELT (expanded_arguments, i) = tree_cons (NULL_TREE, TREE_VEC_ELT (expanded_exprs, i), TREE_VEC_ELT (expanded_arguments, i)); } } in_base_initializer = 0; /* Reverse all of the TREE_LISTs in EXPANDED_ARGUMENTS, since we built them backwards. */ for (i = 0; i < len; i++) { TREE_VEC_ELT (expanded_arguments, i) = nreverse (TREE_VEC_ELT (expanded_arguments, i)); } } } for (i = 0; i < len; ++i) { if (expanded_bases) { decl = TREE_VEC_ELT (expanded_bases, i); decl = expand_member_init (decl); init = TREE_VEC_ELT (expanded_arguments, i); } else { tree tmp; decl = tsubst_copy (TREE_PURPOSE (t), argvec, tf_warning_or_error, NULL_TREE); decl = expand_member_init (decl); if (decl && !DECL_P (decl)) in_base_initializer = 1; init = TREE_VALUE (t); tmp = init; if (init != void_type_node) init = tsubst_expr (init, argvec, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/false); if (init == NULL_TREE && tmp != NULL_TREE) /* If we had an initializer but it instantiated to nothing, value-initialize the object. This will only occur when the initializer was a pack expansion where the parameter packs used in that expansion were of length zero. */ init = void_type_node; in_base_initializer = 0; } if (target_ctor != error_mark_node && init != error_mark_node) { error ("mem-initializer for %qD follows constructor delegation", decl); return inits; } /* Look for a target constructor. */ if (init != error_mark_node && decl && CLASS_TYPE_P (decl) && same_type_p (decl, current_class_type)) { maybe_warn_cpp0x (CPP0X_DELEGATING_CTORS); if (inits) { error ("constructor delegation follows mem-initializer for %qD", TREE_PURPOSE (inits)); continue; } target_ctor = init; } if (decl) { init = build_tree_list (decl, init); /* Carry over the dummy TREE_TYPE node containing the source location. */ TREE_TYPE (init) = TREE_TYPE (t); TREE_CHAIN (init) = inits; inits = init; } } } return inits; } /* Set CURRENT_ACCESS_SPECIFIER based on the protection of DECL. */ static void set_current_access_from_decl (tree decl) { if (TREE_PRIVATE (decl)) current_access_specifier = access_private_node; else if (TREE_PROTECTED (decl)) current_access_specifier = access_protected_node; else current_access_specifier = access_public_node; } /* Instantiate an enumerated type. TAG is the template type, NEWTAG is the instantiation (which should have been created with start_enum) and ARGS are the template arguments to use. */ static void tsubst_enum (tree tag, tree newtag, tree args) { tree e; if (SCOPED_ENUM_P (newtag)) begin_scope (sk_scoped_enum, newtag); for (e = TYPE_VALUES (tag); e; e = TREE_CHAIN (e)) { tree value; tree decl; decl = TREE_VALUE (e); /* Note that in a template enum, the TREE_VALUE is the CONST_DECL, not the corresponding INTEGER_CST. */ value = tsubst_expr (DECL_INITIAL (decl), args, tf_warning_or_error, NULL_TREE, /*integral_constant_expression_p=*/true); /* Give this enumeration constant the correct access. */ set_current_access_from_decl (decl); /* Actually build the enumerator itself. Here we're assuming that enumerators can't have dependent attributes. */ build_enumerator (DECL_NAME (decl), value, newtag, DECL_ATTRIBUTES (decl), DECL_SOURCE_LOCATION (decl)); } if (SCOPED_ENUM_P (newtag)) finish_scope (); finish_enum_value_list (newtag); finish_enum (newtag); DECL_SOURCE_LOCATION (TYPE_NAME (newtag)) = DECL_SOURCE_LOCATION (TYPE_NAME (tag)); } /* DECL is a FUNCTION_DECL that is a template specialization. Return its type -- but without substituting the innermost set of template arguments. So, innermost set of template parameters will appear in the type. */ tree get_mostly_instantiated_function_type (tree decl) { /* For a function, DECL_TI_TEMPLATE is partially instantiated. */ return TREE_TYPE (DECL_TI_TEMPLATE (decl)); } /* Return truthvalue if we're processing a template different from the last one involved in diagnostics. */ bool problematic_instantiation_changed (void) { return current_tinst_level != last_error_tinst_level; } /* Remember current template involved in diagnostics. */ void record_last_problematic_instantiation (void) { set_refcount_ptr (last_error_tinst_level, current_tinst_level); } struct tinst_level * current_instantiation (void) { return current_tinst_level; } /* Return TRUE if current_function_decl is being instantiated, false otherwise. */ bool instantiating_current_function_p (void) { return (current_instantiation () && (current_instantiation ()->maybe_get_node () == current_function_decl)); } /* [temp.param] Check that template non-type parm TYPE is of an allowable type. Return false for ok, true for disallowed. Issue error and inform messages under control of COMPLAIN. */ static bool invalid_nontype_parm_type_p (tree type, tsubst_flags_t complain) { if (INTEGRAL_OR_ENUMERATION_TYPE_P (type)) return false; else if (TYPE_PTR_P (type)) return false; else if (TYPE_REF_P (type) && !TYPE_REF_IS_RVALUE (type)) return false; else if (TYPE_PTRMEM_P (type)) return false; else if (TREE_CODE (type) == TEMPLATE_TYPE_PARM) { if (CLASS_PLACEHOLDER_TEMPLATE (type) && cxx_dialect < cxx20) { if (complain & tf_error) error ("non-type template parameters of deduced class type only " "available with %<-std=c++20%> or %<-std=gnu++20%>"); return true; } return false; } else if (TREE_CODE (type) == TYPENAME_TYPE) return false; else if (TREE_CODE (type) == DECLTYPE_TYPE) return false; else if (TREE_CODE (type) == NULLPTR_TYPE) return false; /* A bound template template parm could later be instantiated to have a valid nontype parm type via an alias template. */ else if (cxx_dialect >= cxx11 && TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) return false; else if (VOID_TYPE_P (type)) /* Fall through. */; else if (cxx_dialect >= cxx20) { if (dependent_type_p (type)) return false; if (!complete_type_or_maybe_complain (type, NULL_TREE, complain)) return true; if (structural_type_p (type)) return false; if (complain & tf_error) { auto_diagnostic_group d; error ("%qT is not a valid type for a template non-type " "parameter because it is not structural", type); structural_type_p (type, true); } return true; } else if (CLASS_TYPE_P (type)) { if (complain & tf_error) error ("non-type template parameters of class type only available " "with %<-std=c++20%> or %<-std=gnu++20%>"); return true; } if (complain & tf_error) { if (type == error_mark_node) inform (input_location, "invalid template non-type parameter"); else error ("%q#T is not a valid type for a template non-type parameter", type); } return true; } /* Returns TRUE if TYPE is dependent, in the sense of [temp.dep.type]. Assumes that TYPE really is a type, and not the ERROR_MARK_NODE.*/ static bool dependent_type_p_r (tree type) { tree scope; /* [temp.dep.type] A type is dependent if it is: -- a template parameter. Template template parameters are types for us (since TYPE_P holds true for them) so we handle them here. */ if (TREE_CODE (type) == TEMPLATE_TYPE_PARM || TREE_CODE (type) == TEMPLATE_TEMPLATE_PARM) return true; /* -- a qualified-id with a nested-name-specifier which contains a class-name that names a dependent type or whose unqualified-id names a dependent type. */ if (TREE_CODE (type) == TYPENAME_TYPE) return true; /* An alias template specialization can be dependent even if the resulting type is not. */ if (dependent_alias_template_spec_p (type, nt_transparent)) return true; /* -- a cv-qualified type where the cv-unqualified type is dependent. No code is necessary for this bullet; the code below handles cv-qualified types, and we don't want to strip aliases with TYPE_MAIN_VARIANT because of DR 1558. */ /* -- a compound type constructed from any dependent type. */ if (TYPE_PTRMEM_P (type)) return (dependent_type_p (TYPE_PTRMEM_CLASS_TYPE (type)) || dependent_type_p (TYPE_PTRMEM_POINTED_TO_TYPE (type))); else if (INDIRECT_TYPE_P (type)) return dependent_type_p (TREE_TYPE (type)); else if (FUNC_OR_METHOD_TYPE_P (type)) { tree arg_type; if (dependent_type_p (TREE_TYPE (type))) return true; for (arg_type = TYPE_ARG_TYPES (type); arg_type; arg_type = TREE_CHAIN (arg_type)) if (dependent_type_p (TREE_VALUE (arg_type))) return true; if (cxx_dialect >= cxx17) /* A value-dependent noexcept-specifier makes the type dependent. */ if (tree spec = TYPE_RAISES_EXCEPTIONS (type)) if (tree noex = TREE_PURPOSE (spec)) /* Treat DEFERRED_NOEXCEPT as non-dependent, since it doesn't affect overload resolution and treating it as dependent breaks things. Same for an unparsed noexcept expression. */ if (TREE_CODE (noex) != DEFERRED_NOEXCEPT && TREE_CODE (noex) != DEFERRED_PARSE && value_dependent_expression_p (noex)) return true; return false; } /* -- an array type constructed from any dependent type or whose size is specified by a constant expression that is value-dependent. We checked for type- and value-dependence of the bounds in compute_array_index_type, so TYPE_DEPENDENT_P is already set. */ if (TREE_CODE (type) == ARRAY_TYPE) { if (TYPE_DOMAIN (type) && dependent_type_p (TYPE_DOMAIN (type))) return true; return dependent_type_p (TREE_TYPE (type)); } /* -- a template-id in which either the template name is a template parameter ... */ if (TREE_CODE (type) == BOUND_TEMPLATE_TEMPLATE_PARM) return true; /* ... or any of the template arguments is a dependent type or an expression that is type-dependent or value-dependent. */ else if (CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type))))) return true; /* All TYPEOF_TYPEs, DECLTYPE_TYPEs, and UNDERLYING_TYPEs are dependent; if the argument of the `typeof' expression is not type-dependent, then it should already been have resolved. */ if (TREE_CODE (type) == TYPEOF_TYPE || TREE_CODE (type) == DECLTYPE_TYPE || TREE_CODE (type) == UNDERLYING_TYPE) return true; /* A template argument pack is dependent if any of its packed arguments are. */ if (TREE_CODE (type) == TYPE_ARGUMENT_PACK) { tree args = ARGUMENT_PACK_ARGS (type); int i, len = TREE_VEC_LENGTH (args); for (i = 0; i < len; ++i) if (dependent_template_arg_p (TREE_VEC_ELT (args, i))) return true; } /* All TYPE_PACK_EXPANSIONs are dependent, because parameter packs must be template parameters. */ if (TREE_CODE (type) == TYPE_PACK_EXPANSION) return true; if (any_dependent_type_attributes_p (TYPE_ATTRIBUTES (type))) return true; /* The standard does not specifically mention types that are local to template functions or local classes, but they should be considered dependent too. For example: template <int I> void f() { enum E { a = I }; S<sizeof (E)> s; } The size of `E' cannot be known until the value of `I' has been determined. Therefore, `E' must be considered dependent. */ scope = TYPE_CONTEXT (type); if (scope && TYPE_P (scope)) return dependent_type_p (scope); /* Don't use type_dependent_expression_p here, as it can lead to infinite recursion trying to determine whether a lambda nested in a lambda is dependent (c++/47687). */ else if (scope && TREE_CODE (scope) == FUNCTION_DECL && DECL_LANG_SPECIFIC (scope) && DECL_TEMPLATE_INFO (scope) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (scope))))) return true; /* Other types are non-dependent. */ return false; } /* Returns TRUE if TYPE is dependent, in the sense of [temp.dep.type]. Note that a NULL type is considered dependent. */ bool dependent_type_p (tree type) { /* If there are no template parameters in scope, then there can't be any dependent types. */ if (!processing_template_decl) { /* If we are not processing a template, then nobody should be providing us with a dependent type. */ gcc_assert (type); gcc_assert (TREE_CODE (type) != TEMPLATE_TYPE_PARM || is_auto (type)); return false; } /* If the type is NULL, we have not computed a type for the entity in question; in that case, the type is dependent. */ if (!type) return true; /* Erroneous types can be considered non-dependent. */ if (type == error_mark_node) return false; /* Getting here with global_type_node means we improperly called this function on the TREE_TYPE of an IDENTIFIER_NODE. */ gcc_checking_assert (type != global_type_node); /* If we have not already computed the appropriate value for TYPE, do so now. */ if (!TYPE_DEPENDENT_P_VALID (type)) { TYPE_DEPENDENT_P (type) = dependent_type_p_r (type); TYPE_DEPENDENT_P_VALID (type) = 1; } return TYPE_DEPENDENT_P (type); } /* Returns TRUE if SCOPE is a dependent scope, in which we can't do any lookup. In other words, a dependent type that is not the current instantiation. */ bool dependent_scope_p (tree scope) { return (scope && TYPE_P (scope) && dependent_type_p (scope) && !currently_open_class (scope)); } /* T is a SCOPE_REF. Return whether it represents a non-static member of an unknown base of 'this' (and is therefore instantiation-dependent). */ static bool unknown_base_ref_p (tree t) { if (!current_class_ptr) return false; tree mem = TREE_OPERAND (t, 1); if (shared_member_p (mem)) return false; tree cur = current_nonlambda_class_type (); if (!any_dependent_bases_p (cur)) return false; tree ctx = TREE_OPERAND (t, 0); if (DERIVED_FROM_P (ctx, cur)) return false; return true; } /* T is a SCOPE_REF; return whether we need to consider it instantiation-dependent so that we can check access at instantiation time even though we know which member it resolves to. */ static bool instantiation_dependent_scope_ref_p (tree t) { if (DECL_P (TREE_OPERAND (t, 1)) && CLASS_TYPE_P (TREE_OPERAND (t, 0)) && !unknown_base_ref_p (t) && accessible_in_template_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1))) return false; else return true; } /* Returns TRUE if the EXPRESSION is value-dependent, in the sense of [temp.dep.constexpr]. EXPRESSION is already known to be a constant expression. */ /* Note that this predicate is not appropriate for general expressions; only constant expressions (that satisfy potential_constant_expression) can be tested for value dependence. */ bool value_dependent_expression_p (tree expression) { if (!processing_template_decl || expression == NULL_TREE) return false; /* A type-dependent expression is also value-dependent. */ if (type_dependent_expression_p (expression)) return true; switch (TREE_CODE (expression)) { case BASELINK: /* A dependent member function of the current instantiation. */ return dependent_type_p (BINFO_TYPE (BASELINK_BINFO (expression))); case FUNCTION_DECL: /* A dependent member function of the current instantiation. */ if (DECL_CLASS_SCOPE_P (expression) && dependent_type_p (DECL_CONTEXT (expression))) return true; break; case IDENTIFIER_NODE: /* A name that has not been looked up -- must be dependent. */ return true; case TEMPLATE_PARM_INDEX: /* A non-type template parm. */ return true; case CONST_DECL: /* A non-type template parm. */ if (DECL_TEMPLATE_PARM_P (expression)) return true; return value_dependent_expression_p (DECL_INITIAL (expression)); case VAR_DECL: /* A constant with literal type and is initialized with an expression that is value-dependent. */ if (DECL_DEPENDENT_INIT_P (expression) /* FIXME cp_finish_decl doesn't fold reference initializers. */ || TYPE_REF_P (TREE_TYPE (expression))) return true; if (DECL_HAS_VALUE_EXPR_P (expression)) { tree value_expr = DECL_VALUE_EXPR (expression); if (value_dependent_expression_p (value_expr) /* __PRETTY_FUNCTION__ inside a template function is dependent on the name of the function. */ || (DECL_PRETTY_FUNCTION_P (expression) /* It might be used in a template, but not a template function, in which case its DECL_VALUE_EXPR will be "top level". */ && value_expr == error_mark_node)) return true; } return false; case DYNAMIC_CAST_EXPR: case STATIC_CAST_EXPR: case CONST_CAST_EXPR: case REINTERPRET_CAST_EXPR: case CAST_EXPR: case IMPLICIT_CONV_EXPR: /* These expressions are value-dependent if the type to which the cast occurs is dependent or the expression being casted is value-dependent. */ { tree type = TREE_TYPE (expression); if (dependent_type_p (type)) return true; /* A functional cast has a list of operands. */ expression = TREE_OPERAND (expression, 0); if (!expression) { /* If there are no operands, it must be an expression such as "int()". This should not happen for aggregate types because it would form non-constant expressions. */ gcc_assert (cxx_dialect >= cxx11 || INTEGRAL_OR_ENUMERATION_TYPE_P (type)); return false; } if (TREE_CODE (expression) == TREE_LIST) return any_value_dependent_elements_p (expression); return value_dependent_expression_p (expression); } case SIZEOF_EXPR: if (SIZEOF_EXPR_TYPE_P (expression)) return dependent_type_p (TREE_TYPE (TREE_OPERAND (expression, 0))); /* FALLTHRU */ case ALIGNOF_EXPR: case TYPEID_EXPR: /* A `sizeof' expression is value-dependent if the operand is type-dependent or is a pack expansion. */ expression = TREE_OPERAND (expression, 0); if (PACK_EXPANSION_P (expression)) return true; else if (TYPE_P (expression)) return dependent_type_p (expression); return instantiation_dependent_uneval_expression_p (expression); case AT_ENCODE_EXPR: /* An 'encode' expression is value-dependent if the operand is type-dependent. */ expression = TREE_OPERAND (expression, 0); return dependent_type_p (expression); case NOEXCEPT_EXPR: expression = TREE_OPERAND (expression, 0); return instantiation_dependent_uneval_expression_p (expression); case SCOPE_REF: /* All instantiation-dependent expressions should also be considered value-dependent. */ return instantiation_dependent_scope_ref_p (expression); case COMPONENT_REF: return (value_dependent_expression_p (TREE_OPERAND (expression, 0)) || value_dependent_expression_p (TREE_OPERAND (expression, 1))); case NONTYPE_ARGUMENT_PACK: /* A NONTYPE_ARGUMENT_PACK is value-dependent if any packed argument is value-dependent. */ { tree values = ARGUMENT_PACK_ARGS (expression); int i, len = TREE_VEC_LENGTH (values); for (i = 0; i < len; ++i) if (value_dependent_expression_p (TREE_VEC_ELT (values, i))) return true; return false; } case TRAIT_EXPR: { tree type2 = TRAIT_EXPR_TYPE2 (expression); if (dependent_type_p (TRAIT_EXPR_TYPE1 (expression))) return true; if (!type2) return false; if (TREE_CODE (type2) != TREE_LIST) return dependent_type_p (type2); for (; type2; type2 = TREE_CHAIN (type2)) if (dependent_type_p (TREE_VALUE (type2))) return true; return false; } case MODOP_EXPR: return ((value_dependent_expression_p (TREE_OPERAND (expression, 0))) || (value_dependent_expression_p (TREE_OPERAND (expression, 2)))); case ARRAY_REF: return ((value_dependent_expression_p (TREE_OPERAND (expression, 0))) || (value_dependent_expression_p (TREE_OPERAND (expression, 1)))); case ADDR_EXPR: { tree op = TREE_OPERAND (expression, 0); return (value_dependent_expression_p (op) || has_value_dependent_address (op)); } case REQUIRES_EXPR: /* Treat all requires-expressions as value-dependent so we don't try to fold them. */ return true; case TYPE_REQ: return dependent_type_p (TREE_OPERAND (expression, 0)); case CALL_EXPR: { if (value_dependent_expression_p (CALL_EXPR_FN (expression))) return true; tree fn = get_callee_fndecl (expression); int i, nargs; nargs = call_expr_nargs (expression); for (i = 0; i < nargs; ++i) { tree op = CALL_EXPR_ARG (expression, i); /* In a call to a constexpr member function, look through the implicit ADDR_EXPR on the object argument so that it doesn't cause the call to be considered value-dependent. We also look through it in potential_constant_expression. */ if (i == 0 && fn && DECL_DECLARED_CONSTEXPR_P (fn) && DECL_NONSTATIC_MEMBER_FUNCTION_P (fn) && TREE_CODE (op) == ADDR_EXPR) op = TREE_OPERAND (op, 0); if (value_dependent_expression_p (op)) return true; } return false; } case TEMPLATE_ID_EXPR: return concept_definition_p (TREE_OPERAND (expression, 0)); case CONSTRUCTOR: { unsigned ix; tree val; if (dependent_type_p (TREE_TYPE (expression))) return true; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expression), ix, val) if (value_dependent_expression_p (val)) return true; return false; } case STMT_EXPR: /* Treat a GNU statement expression as dependent to avoid crashing under instantiate_non_dependent_expr; it can't be constant. */ return true; default: /* A constant expression is value-dependent if any subexpression is value-dependent. */ switch (TREE_CODE_CLASS (TREE_CODE (expression))) { case tcc_reference: case tcc_unary: case tcc_comparison: case tcc_binary: case tcc_expression: case tcc_vl_exp: { int i, len = cp_tree_operand_length (expression); for (i = 0; i < len; i++) { tree t = TREE_OPERAND (expression, i); /* In some cases, some of the operands may be missing. (For example, in the case of PREDECREMENT_EXPR, the amount to increment by may be missing.) That doesn't make the expression dependent. */ if (t && value_dependent_expression_p (t)) return true; } } break; default: break; } break; } /* The expression is not value-dependent. */ return false; } /* Returns TRUE if the EXPRESSION is type-dependent, in the sense of [temp.dep.expr]. Note that an expression with no type is considered dependent. Other parts of the compiler arrange for an expression with type-dependent subexpressions to have no type, so this function doesn't have to be fully recursive. */ bool type_dependent_expression_p (tree expression) { if (!processing_template_decl) return false; if (expression == NULL_TREE || expression == error_mark_node) return false; STRIP_ANY_LOCATION_WRAPPER (expression); /* An unresolved name is always dependent. */ if (identifier_p (expression) || TREE_CODE (expression) == USING_DECL || TREE_CODE (expression) == WILDCARD_DECL) return true; /* A lambda-expression in template context is dependent. dependent_type_p is true for a lambda in the scope of a class or function template, but that doesn't cover all template contexts, like a default template argument. */ if (TREE_CODE (expression) == LAMBDA_EXPR) return true; /* A fold expression is type-dependent. */ if (TREE_CODE (expression) == UNARY_LEFT_FOLD_EXPR || TREE_CODE (expression) == UNARY_RIGHT_FOLD_EXPR || TREE_CODE (expression) == BINARY_LEFT_FOLD_EXPR || TREE_CODE (expression) == BINARY_RIGHT_FOLD_EXPR) return true; /* Some expression forms are never type-dependent. */ if (TREE_CODE (expression) == SIZEOF_EXPR || TREE_CODE (expression) == ALIGNOF_EXPR || TREE_CODE (expression) == AT_ENCODE_EXPR || TREE_CODE (expression) == NOEXCEPT_EXPR || TREE_CODE (expression) == TRAIT_EXPR || TREE_CODE (expression) == TYPEID_EXPR || TREE_CODE (expression) == DELETE_EXPR || TREE_CODE (expression) == VEC_DELETE_EXPR || TREE_CODE (expression) == THROW_EXPR || TREE_CODE (expression) == REQUIRES_EXPR) return false; /* The types of these expressions depends only on the type to which the cast occurs. */ if (TREE_CODE (expression) == DYNAMIC_CAST_EXPR || TREE_CODE (expression) == STATIC_CAST_EXPR || TREE_CODE (expression) == CONST_CAST_EXPR || TREE_CODE (expression) == REINTERPRET_CAST_EXPR || TREE_CODE (expression) == IMPLICIT_CONV_EXPR || TREE_CODE (expression) == CAST_EXPR) return dependent_type_p (TREE_TYPE (expression)); /* The types of these expressions depends only on the type created by the expression. */ if (TREE_CODE (expression) == NEW_EXPR || TREE_CODE (expression) == VEC_NEW_EXPR) { /* For NEW_EXPR tree nodes created inside a template, either the object type itself or a TREE_LIST may appear as the operand 1. */ tree type = TREE_OPERAND (expression, 1); if (TREE_CODE (type) == TREE_LIST) /* This is an array type. We need to check array dimensions as well. */ return dependent_type_p (TREE_VALUE (TREE_PURPOSE (type))) || value_dependent_expression_p (TREE_OPERAND (TREE_VALUE (type), 1)); /* Array type whose dimension has to be deduced. */ else if (TREE_CODE (type) == ARRAY_TYPE && TREE_OPERAND (expression, 2) == NULL_TREE) return true; else return dependent_type_p (type); } if (TREE_CODE (expression) == SCOPE_REF) { tree scope = TREE_OPERAND (expression, 0); tree name = TREE_OPERAND (expression, 1); /* 14.6.2.2 [temp.dep.expr]: An id-expression is type-dependent if it contains an identifier associated by name lookup with one or more declarations declared with a dependent type, or...a nested-name-specifier or qualified-id that names a member of an unknown specialization. */ return (type_dependent_expression_p (name) || dependent_scope_p (scope)); } if (TREE_CODE (expression) == TEMPLATE_DECL && !DECL_TEMPLATE_TEMPLATE_PARM_P (expression)) return uses_outer_template_parms (expression); if (TREE_CODE (expression) == STMT_EXPR) expression = stmt_expr_value_expr (expression); if (BRACE_ENCLOSED_INITIALIZER_P (expression)) { tree elt; unsigned i; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expression), i, elt) { if (type_dependent_expression_p (elt)) return true; } return false; } /* A static data member of the current instantiation with incomplete array type is type-dependent, as the definition and specializations can have different bounds. */ if (VAR_P (expression) && DECL_CLASS_SCOPE_P (expression) && dependent_type_p (DECL_CONTEXT (expression)) && VAR_HAD_UNKNOWN_BOUND (expression)) return true; /* An array of unknown bound depending on a variadic parameter, eg: template<typename... Args> void foo (Args... args) { int arr[] = { args... }; } template<int... vals> void bar () { int arr[] = { vals... }; } If the array has no length and has an initializer, it must be that we couldn't determine its length in cp_complete_array_type because it is dependent. */ if (VAR_P (expression) && TREE_TYPE (expression) != NULL_TREE && TREE_CODE (TREE_TYPE (expression)) == ARRAY_TYPE && !TYPE_DOMAIN (TREE_TYPE (expression)) && DECL_INITIAL (expression)) return true; /* A function or variable template-id is type-dependent if it has any dependent template arguments. */ if (VAR_OR_FUNCTION_DECL_P (expression) && DECL_LANG_SPECIFIC (expression) && DECL_TEMPLATE_INFO (expression)) { /* Consider the innermost template arguments, since those are the ones that come from the template-id; the template arguments for the enclosing class do not make it type-dependent unless they are used in the type of the decl. */ if (instantiates_primary_template_p (expression) && (any_dependent_template_arguments_p (INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (expression))))) return true; } /* Otherwise, if the function decl isn't from a dependent scope, it can't be type-dependent. Checking this is important for functions with auto return type, which looks like a dependent type. */ if (TREE_CODE (expression) == FUNCTION_DECL && !(DECL_CLASS_SCOPE_P (expression) && dependent_type_p (DECL_CONTEXT (expression))) && !(DECL_LANG_SPECIFIC (expression) && DECL_UNIQUE_FRIEND_P (expression) && (!DECL_FRIEND_CONTEXT (expression) || dependent_type_p (DECL_FRIEND_CONTEXT (expression)))) && !DECL_LOCAL_DECL_P (expression)) { gcc_assert (!dependent_type_p (TREE_TYPE (expression)) || undeduced_auto_decl (expression)); return false; } /* Always dependent, on the number of arguments if nothing else. */ if (TREE_CODE (expression) == EXPR_PACK_EXPANSION) return true; if (TREE_TYPE (expression) == unknown_type_node) { if (TREE_CODE (expression) == ADDR_EXPR) return type_dependent_expression_p (TREE_OPERAND (expression, 0)); if (TREE_CODE (expression) == COMPONENT_REF || TREE_CODE (expression) == OFFSET_REF) { if (type_dependent_expression_p (TREE_OPERAND (expression, 0))) return true; expression = TREE_OPERAND (expression, 1); if (identifier_p (expression)) return false; } /* SCOPE_REF with non-null TREE_TYPE is always non-dependent. */ if (TREE_CODE (expression) == SCOPE_REF) return false; /* CO_AWAIT/YIELD_EXPR with unknown type is always dependent. */ if (TREE_CODE (expression) == CO_AWAIT_EXPR || TREE_CODE (expression) == CO_YIELD_EXPR) return true; if (BASELINK_P (expression)) { if (BASELINK_OPTYPE (expression) && dependent_type_p (BASELINK_OPTYPE (expression))) return true; expression = BASELINK_FUNCTIONS (expression); } if (TREE_CODE (expression) == TEMPLATE_ID_EXPR) { if (any_dependent_template_arguments_p (TREE_OPERAND (expression, 1))) return true; expression = TREE_OPERAND (expression, 0); if (identifier_p (expression)) return true; } gcc_assert (OVL_P (expression)); for (lkp_iterator iter (expression); iter; ++iter) if (type_dependent_expression_p (*iter)) return true; return false; } /* The type of a non-type template parm declared with a placeholder type depends on the corresponding template argument, even though placeholders are not normally considered dependent. */ if (TREE_CODE (expression) == TEMPLATE_PARM_INDEX && is_auto (TREE_TYPE (expression))) return true; gcc_assert (TREE_CODE (expression) != TYPE_DECL); /* Dependent type attributes might not have made it from the decl to the type yet. */ if (DECL_P (expression) && any_dependent_type_attributes_p (DECL_ATTRIBUTES (expression))) return true; return (dependent_type_p (TREE_TYPE (expression))); } /* [temp.dep.expr]/5: A class member access expression (5.2.5) is type-dependent if the expression refers to a member of the current instantiation and the type of the referenced member is dependent, or the class member access expression refers to a member of an unknown specialization. This function returns true if the OBJECT in such a class member access expression is of an unknown specialization. */ bool type_dependent_object_expression_p (tree object) { /* An IDENTIFIER_NODE can sometimes have a TREE_TYPE, but it's still dependent. */ if (TREE_CODE (object) == IDENTIFIER_NODE) return true; tree scope = TREE_TYPE (object); return (!scope || dependent_scope_p (scope)); } /* walk_tree callback function for instantiation_dependent_expression_p, below. Returns non-zero if a dependent subexpression is found. */ static tree instantiation_dependent_r (tree *tp, int *walk_subtrees, void * /*data*/) { if (TYPE_P (*tp)) { /* We don't have to worry about decltype currently because decltype of an instantiation-dependent expr is a dependent type. This might change depending on the resolution of DR 1172. */ *walk_subtrees = false; return NULL_TREE; } enum tree_code code = TREE_CODE (*tp); switch (code) { /* Don't treat an argument list as dependent just because it has no TREE_TYPE. */ case TREE_LIST: case TREE_VEC: case NONTYPE_ARGUMENT_PACK: return NULL_TREE; case TEMPLATE_PARM_INDEX: if (dependent_type_p (TREE_TYPE (*tp))) return *tp; if (TEMPLATE_PARM_PARAMETER_PACK (*tp)) return *tp; /* We'll check value-dependence separately. */ return NULL_TREE; /* Handle expressions with type operands. */ case SIZEOF_EXPR: case ALIGNOF_EXPR: case TYPEID_EXPR: case AT_ENCODE_EXPR: { tree op = TREE_OPERAND (*tp, 0); if (code == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (*tp)) op = TREE_TYPE (op); if (TYPE_P (op)) { if (dependent_type_p (op)) return *tp; else { *walk_subtrees = false; return NULL_TREE; } } break; } case COMPONENT_REF: if (identifier_p (TREE_OPERAND (*tp, 1))) /* In a template, finish_class_member_access_expr creates a COMPONENT_REF with an IDENTIFIER_NODE for op1 even if it isn't type-dependent, so that we can check access control at instantiation time (PR 42277). See also Core issue 1273. */ return *tp; break; case SCOPE_REF: if (instantiation_dependent_scope_ref_p (*tp)) return *tp; else break; /* Treat statement-expressions as dependent. */ case BIND_EXPR: return *tp; /* Treat requires-expressions as dependent. */ case REQUIRES_EXPR: return *tp; case CALL_EXPR: /* Treat concept checks as dependent. */ if (concept_check_p (*tp)) return *tp; break; case TEMPLATE_ID_EXPR: /* Treat concept checks as dependent. */ if (concept_check_p (*tp)) return *tp; break; case CONSTRUCTOR: if (CONSTRUCTOR_IS_DEPENDENT (*tp)) return *tp; break; default: break; } if (type_dependent_expression_p (*tp)) return *tp; else return NULL_TREE; } /* Returns TRUE if the EXPRESSION is instantiation-dependent, in the sense defined by the ABI: "An expression is instantiation-dependent if it is type-dependent or value-dependent, or it has a subexpression that is type-dependent or value-dependent." Except don't actually check value-dependence for unevaluated expressions, because in sizeof(i) we don't care about the value of i. Checking type-dependence will in turn check value-dependence of array bounds/template arguments as needed. */ bool instantiation_dependent_uneval_expression_p (tree expression) { tree result; if (!processing_template_decl) return false; if (expression == error_mark_node) return false; result = cp_walk_tree_without_duplicates (&expression, instantiation_dependent_r, NULL); return result != NULL_TREE; } /* As above, but also check value-dependence of the expression as a whole. */ bool instantiation_dependent_expression_p (tree expression) { return (instantiation_dependent_uneval_expression_p (expression) || value_dependent_expression_p (expression)); } /* Like type_dependent_expression_p, but it also works while not processing a template definition, i.e. during substitution or mangling. */ bool type_dependent_expression_p_push (tree expr) { bool b; ++processing_template_decl; b = type_dependent_expression_p (expr); --processing_template_decl; return b; } /* Returns TRUE if ARGS contains a type-dependent expression. */ bool any_type_dependent_arguments_p (const vec<tree, va_gc> *args) { unsigned int i; tree arg; FOR_EACH_VEC_SAFE_ELT (args, i, arg) { if (type_dependent_expression_p (arg)) return true; } return false; } /* Returns TRUE if LIST (a TREE_LIST whose TREE_VALUEs are expressions) contains any type-dependent expressions. */ bool any_type_dependent_elements_p (const_tree list) { for (; list; list = TREE_CHAIN (list)) if (type_dependent_expression_p (TREE_VALUE (list))) return true; return false; } /* Returns TRUE if LIST (a TREE_LIST whose TREE_VALUEs are expressions) contains any value-dependent expressions. */ bool any_value_dependent_elements_p (const_tree list) { for (; list; list = TREE_CHAIN (list)) if (value_dependent_expression_p (TREE_VALUE (list))) return true; return false; } /* Returns TRUE if the ARG (a template argument) is dependent. */ bool dependent_template_arg_p (tree arg) { if (!processing_template_decl) return false; /* Assume a template argument that was wrongly written by the user is dependent. This is consistent with what any_dependent_template_arguments_p [that calls this function] does. */ if (!arg || arg == error_mark_node) return true; if (TREE_CODE (arg) == ARGUMENT_PACK_SELECT) arg = argument_pack_select_arg (arg); if (TREE_CODE (arg) == TEMPLATE_TEMPLATE_PARM) return true; if (TREE_CODE (arg) == TEMPLATE_DECL) { if (DECL_TEMPLATE_PARM_P (arg)) return true; /* A member template of a dependent class is not necessarily type-dependent, but it is a dependent template argument because it will be a member of an unknown specialization to that template. */ tree scope = CP_DECL_CONTEXT (arg); return TYPE_P (scope) && dependent_type_p (scope); } else if (ARGUMENT_PACK_P (arg)) { tree args = ARGUMENT_PACK_ARGS (arg); int i, len = TREE_VEC_LENGTH (args); for (i = 0; i < len; ++i) { if (dependent_template_arg_p (TREE_VEC_ELT (args, i))) return true; } return false; } else if (TYPE_P (arg)) return dependent_type_p (arg); else return value_dependent_expression_p (arg); } /* Returns true if ARGS (a collection of template arguments) contains any types that require structural equality testing. */ bool any_template_arguments_need_structural_equality_p (tree args) { int i; int j; if (!args) return false; if (args == error_mark_node) return true; for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i) { tree level = TMPL_ARGS_LEVEL (args, i + 1); for (j = 0; j < TREE_VEC_LENGTH (level); ++j) { tree arg = TREE_VEC_ELT (level, j); tree packed_args = NULL_TREE; int k, len = 1; if (ARGUMENT_PACK_P (arg)) { /* Look inside the argument pack. */ packed_args = ARGUMENT_PACK_ARGS (arg); len = TREE_VEC_LENGTH (packed_args); } for (k = 0; k < len; ++k) { if (packed_args) arg = TREE_VEC_ELT (packed_args, k); if (error_operand_p (arg)) return true; else if (TREE_CODE (arg) == TEMPLATE_DECL) continue; else if (TYPE_P (arg) && TYPE_STRUCTURAL_EQUALITY_P (arg)) return true; else if (!TYPE_P (arg) && TREE_TYPE (arg) && TYPE_STRUCTURAL_EQUALITY_P (TREE_TYPE (arg))) return true; } } } return false; } /* Returns true if ARGS (a collection of template arguments) contains any dependent arguments. */ bool any_dependent_template_arguments_p (const_tree args) { int i; int j; if (!args) return false; if (args == error_mark_node) return true; for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i) { const_tree level = TMPL_ARGS_LEVEL (args, i + 1); for (j = 0; j < TREE_VEC_LENGTH (level); ++j) if (dependent_template_arg_p (TREE_VEC_ELT (level, j))) return true; } return false; } /* Returns true if ARGS contains any errors. */ bool any_erroneous_template_args_p (const_tree args) { int i; int j; if (args == error_mark_node) return true; if (args && TREE_CODE (args) != TREE_VEC) { if (tree ti = get_template_info (args)) args = TI_ARGS (ti); else args = NULL_TREE; } if (!args) return false; for (i = 0; i < TMPL_ARGS_DEPTH (args); ++i) { const_tree level = TMPL_ARGS_LEVEL (args, i + 1); for (j = 0; j < TREE_VEC_LENGTH (level); ++j) if (error_operand_p (TREE_VEC_ELT (level, j))) return true; } return false; } /* Returns TRUE if the template TMPL is type-dependent. */ bool dependent_template_p (tree tmpl) { if (TREE_CODE (tmpl) == OVERLOAD) { for (lkp_iterator iter (tmpl); iter; ++iter) if (dependent_template_p (*iter)) return true; return false; } /* Template template parameters are dependent. */ if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl) || TREE_CODE (tmpl) == TEMPLATE_TEMPLATE_PARM) return true; /* So are names that have not been looked up. */ if (TREE_CODE (tmpl) == SCOPE_REF || identifier_p (tmpl)) return true; return false; } /* Returns TRUE if the specialization TMPL<ARGS> is dependent. */ bool dependent_template_id_p (tree tmpl, tree args) { return (dependent_template_p (tmpl) || any_dependent_template_arguments_p (args)); } /* Returns TRUE if OMP_FOR with DECLV, INITV, CONDV and INCRV vectors are dependent. */ bool dependent_omp_for_p (tree declv, tree initv, tree condv, tree incrv) { int i; if (!processing_template_decl) return false; for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); if (type_dependent_expression_p (decl) || TREE_CODE (decl) == SCOPE_REF) return true; if (init && type_dependent_expression_p (init)) return true; if (cond == global_namespace) return true; if (type_dependent_expression_p (cond)) return true; if (COMPARISON_CLASS_P (cond) && (type_dependent_expression_p (TREE_OPERAND (cond, 0)) || type_dependent_expression_p (TREE_OPERAND (cond, 1)))) return true; if (TREE_CODE (incr) == MODOP_EXPR) { if (type_dependent_expression_p (TREE_OPERAND (incr, 0)) || type_dependent_expression_p (TREE_OPERAND (incr, 2))) return true; } else if (type_dependent_expression_p (incr)) return true; else if (TREE_CODE (incr) == MODIFY_EXPR) { if (type_dependent_expression_p (TREE_OPERAND (incr, 0))) return true; else if (BINARY_CLASS_P (TREE_OPERAND (incr, 1))) { tree t = TREE_OPERAND (incr, 1); if (type_dependent_expression_p (TREE_OPERAND (t, 0)) || type_dependent_expression_p (TREE_OPERAND (t, 1))) return true; /* If this loop has a class iterator with != comparison with increment other than i++/++i/i--/--i, make sure the increment is constant. */ if (CLASS_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (cond) == NE_EXPR) { if (TREE_OPERAND (t, 0) == decl) t = TREE_OPERAND (t, 1); else t = TREE_OPERAND (t, 0); if (TREE_CODE (t) != INTEGER_CST) return true; } } } } return false; } /* TYPE is a TYPENAME_TYPE. Returns the ordinary TYPE to which the TYPENAME_TYPE corresponds. Returns the original TYPENAME_TYPE if no such TYPE can be found. Note that this function peers inside uninstantiated templates and therefore should be used only in extremely limited situations. ONLY_CURRENT_P restricts this peering to the currently open classes hierarchy (which is required when comparing types). */ tree resolve_typename_type (tree type, bool only_current_p) { tree scope; tree name; tree decl; int quals; tree pushed_scope; tree result; gcc_assert (TREE_CODE (type) == TYPENAME_TYPE); scope = TYPE_CONTEXT (type); /* We shouldn't have built a TYPENAME_TYPE with a non-dependent scope. */ gcc_checking_assert (uses_template_parms (scope)); /* Usually the non-qualified identifier of a TYPENAME_TYPE is TYPE_IDENTIFIER (type). But when 'type' is a typedef variant of a TYPENAME_TYPE node, then TYPE_NAME (type) is set to the TYPE_DECL representing the typedef. In that case TYPE_IDENTIFIER (type) is not the non-qualified identifier of the TYPENAME_TYPE anymore. So by getting the TYPE_IDENTIFIER of the _main declaration_ of the TYPENAME_TYPE instead, we avoid messing up with a possible typedef variant case. */ name = TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (type)); /* If the SCOPE is itself a TYPENAME_TYPE, then we need to resolve it first before we can figure out what NAME refers to. */ if (TREE_CODE (scope) == TYPENAME_TYPE) { if (TYPENAME_IS_RESOLVING_P (scope)) /* Given a class template A with a dependent base with nested type C, typedef typename A::C::C C will land us here, as trying to resolve the initial A::C leads to the local C typedef, which leads back to A::C::C. So we break the recursion now. */ return type; else scope = resolve_typename_type (scope, only_current_p); } /* If we don't know what SCOPE refers to, then we cannot resolve the TYPENAME_TYPE. */ if (!CLASS_TYPE_P (scope)) return type; /* If this is a typedef, we don't want to look inside (c++/11987). */ if (typedef_variant_p (type)) return type; /* If SCOPE isn't the template itself, it will not have a valid TYPE_FIELDS list. */ if (same_type_p (scope, CLASSTYPE_PRIMARY_TEMPLATE_TYPE (scope))) /* scope is either the template itself or a compatible instantiation like X<T>, so look up the name in the original template. */ scope = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (scope); /* If scope has no fields, it can't be a current instantiation. Check this before currently_open_class to avoid infinite recursion (71515). */ if (!TYPE_FIELDS (scope)) return type; /* If the SCOPE is not the current instantiation, there's no reason to look inside it. */ if (only_current_p && !currently_open_class (scope)) return type; /* Enter the SCOPE so that name lookup will be resolved as if we were in the class definition. In particular, SCOPE will no longer be considered a dependent type. */ pushed_scope = push_scope (scope); /* Look up the declaration. */ decl = lookup_member (scope, name, /*protect=*/0, /*want_type=*/true, tf_warning_or_error); result = NULL_TREE; /* For a TYPENAME_TYPE like "typename X::template Y<T>", we want to find a TEMPLATE_DECL. Otherwise, we want to find a TYPE_DECL. */ tree fullname = TYPENAME_TYPE_FULLNAME (type); if (!decl) /*nop*/; else if (identifier_p (fullname) && TREE_CODE (decl) == TYPE_DECL) { result = TREE_TYPE (decl); if (result == error_mark_node) result = NULL_TREE; } else if (TREE_CODE (fullname) == TEMPLATE_ID_EXPR && DECL_CLASS_TEMPLATE_P (decl)) { /* Obtain the template and the arguments. */ tree tmpl = TREE_OPERAND (fullname, 0); if (TREE_CODE (tmpl) == IDENTIFIER_NODE) { /* We get here with a plain identifier because a previous tentative parse of the nested-name-specifier as part of a ptr-operator saw ::template X<A>. The use of ::template is necessary in a ptr-operator, but wrong in a declarator-id. [temp.names]: In a qualified-id of a declarator-id, the keyword template shall not appear at the top level. */ pedwarn (cp_expr_loc_or_input_loc (fullname), OPT_Wpedantic, "keyword %<template%> not allowed in declarator-id"); tmpl = decl; } tree args = TREE_OPERAND (fullname, 1); /* Instantiate the template. */ result = lookup_template_class (tmpl, args, NULL_TREE, NULL_TREE, /*entering_scope=*/true, tf_error | tf_user); if (result == error_mark_node) result = NULL_TREE; } /* Leave the SCOPE. */ if (pushed_scope) pop_scope (pushed_scope); /* If we failed to resolve it, return the original typename. */ if (!result) return type; /* If lookup found a typename type, resolve that too. */ if (TREE_CODE (result) == TYPENAME_TYPE && !TYPENAME_IS_RESOLVING_P (result)) { /* Ill-formed programs can cause infinite recursion here, so we must catch that. */ TYPENAME_IS_RESOLVING_P (result) = 1; result = resolve_typename_type (result, only_current_p); TYPENAME_IS_RESOLVING_P (result) = 0; } /* Qualify the resulting type. */ quals = cp_type_quals (type); if (quals) result = cp_build_qualified_type (result, cp_type_quals (result) | quals); return result; } /* EXPR is an expression which is not type-dependent. Return a proxy for EXPR that can be used to compute the types of larger expressions containing EXPR. */ tree build_non_dependent_expr (tree expr) { tree orig_expr = expr; tree inner_expr; /* When checking, try to get a constant value for all non-dependent expressions in order to expose bugs in *_dependent_expression_p and constexpr. This can affect code generation, see PR70704, so only do this for -fchecking=2. */ if (flag_checking > 1 && cxx_dialect >= cxx11 /* Don't do this during nsdmi parsing as it can lead to unexpected recursive instantiations. */ && !parsing_nsdmi () /* Don't do this during concept processing either and for the same reason. */ && !processing_constraint_expression_p ()) fold_non_dependent_expr (expr, tf_none); STRIP_ANY_LOCATION_WRAPPER (expr); /* Preserve OVERLOADs; the functions must be available to resolve types. */ inner_expr = expr; if (TREE_CODE (inner_expr) == STMT_EXPR) inner_expr = stmt_expr_value_expr (inner_expr); if (TREE_CODE (inner_expr) == ADDR_EXPR) inner_expr = TREE_OPERAND (inner_expr, 0); if (TREE_CODE (inner_expr) == COMPONENT_REF) inner_expr = TREE_OPERAND (inner_expr, 1); if (is_overloaded_fn (inner_expr) || TREE_CODE (inner_expr) == OFFSET_REF) return orig_expr; /* There is no need to return a proxy for a variable or enumerator. */ if (VAR_P (expr) || TREE_CODE (expr) == CONST_DECL) return orig_expr; /* Preserve string constants; conversions from string constants to "char *" are allowed, even though normally a "const char *" cannot be used to initialize a "char *". */ if (TREE_CODE (expr) == STRING_CST) return orig_expr; /* Preserve void and arithmetic constants, as an optimization -- there is no reason to create a new node. */ if (TREE_CODE (expr) == VOID_CST || TREE_CODE (expr) == INTEGER_CST || TREE_CODE (expr) == REAL_CST) return orig_expr; /* Preserve THROW_EXPRs -- all throw-expressions have type "void". There is at least one place where we want to know that a particular expression is a throw-expression: when checking a ?: expression, there are special rules if the second or third argument is a throw-expression. */ if (TREE_CODE (expr) == THROW_EXPR) return orig_expr; /* Don't wrap an initializer list, we need to be able to look inside. */ if (BRACE_ENCLOSED_INITIALIZER_P (expr)) return orig_expr; /* Don't wrap a dummy object, we need to be able to test for it. */ if (is_dummy_object (expr)) return orig_expr; if (TREE_CODE (expr) == COND_EXPR) return build3 (COND_EXPR, TREE_TYPE (expr), build_non_dependent_expr (TREE_OPERAND (expr, 0)), (TREE_OPERAND (expr, 1) ? build_non_dependent_expr (TREE_OPERAND (expr, 1)) : build_non_dependent_expr (TREE_OPERAND (expr, 0))), build_non_dependent_expr (TREE_OPERAND (expr, 2))); if (TREE_CODE (expr) == COMPOUND_EXPR && !COMPOUND_EXPR_OVERLOADED (expr)) return build2 (COMPOUND_EXPR, TREE_TYPE (expr), TREE_OPERAND (expr, 0), build_non_dependent_expr (TREE_OPERAND (expr, 1))); /* If the type is unknown, it can't really be non-dependent */ gcc_assert (TREE_TYPE (expr) != unknown_type_node); /* Otherwise, build a NON_DEPENDENT_EXPR. */ return build1_loc (EXPR_LOCATION (orig_expr), NON_DEPENDENT_EXPR, TREE_TYPE (expr), expr); } /* ARGS is a vector of expressions as arguments to a function call. Replace the arguments with equivalent non-dependent expressions. This modifies ARGS in place. */ void make_args_non_dependent (vec<tree, va_gc> *args) { unsigned int ix; tree arg; FOR_EACH_VEC_SAFE_ELT (args, ix, arg) { tree newarg = build_non_dependent_expr (arg); if (newarg != arg) (*args)[ix] = newarg; } } /* Returns a type which represents 'auto' or 'decltype(auto)'. We use a TEMPLATE_TYPE_PARM with a level one deeper than the actual template parms. If set_canonical is true, we set TYPE_CANONICAL on it. */ static tree make_auto_1 (tree name, bool set_canonical) { tree au = cxx_make_type (TEMPLATE_TYPE_PARM); TYPE_NAME (au) = build_decl (input_location, TYPE_DECL, name, au); TYPE_STUB_DECL (au) = TYPE_NAME (au); TEMPLATE_TYPE_PARM_INDEX (au) = build_template_parm_index (0, processing_template_decl + 1, processing_template_decl + 1, TYPE_NAME (au), NULL_TREE); if (set_canonical) TYPE_CANONICAL (au) = canonical_type_parameter (au); DECL_ARTIFICIAL (TYPE_NAME (au)) = 1; SET_DECL_TEMPLATE_PARM_P (TYPE_NAME (au)); if (name == decltype_auto_identifier) AUTO_IS_DECLTYPE (au) = true; return au; } tree make_decltype_auto (void) { return make_auto_1 (decltype_auto_identifier, true); } tree make_auto (void) { return make_auto_1 (auto_identifier, true); } /* Return a C++17 deduction placeholder for class template TMPL. */ tree make_template_placeholder (tree tmpl) { tree t = make_auto_1 (auto_identifier, false); CLASS_PLACEHOLDER_TEMPLATE (t) = tmpl; /* Our canonical type depends on the placeholder. */ TYPE_CANONICAL (t) = canonical_type_parameter (t); return t; } /* True iff T is a C++17 class template deduction placeholder. */ bool template_placeholder_p (tree t) { return is_auto (t) && CLASS_PLACEHOLDER_TEMPLATE (t); } /* Make a "constrained auto" type-specifier. This is an auto or decltype(auto) type with constraints that must be associated after deduction. The constraint is formed from the given concept CON and its optional sequence of template arguments ARGS. TYPE must be the result of make_auto_type or make_decltype_auto_type. */ static tree make_constrained_placeholder_type (tree type, tree con, tree args) { /* Build the constraint. */ tree tmpl = DECL_TI_TEMPLATE (con); tree expr = tmpl; if (TREE_CODE (con) == FUNCTION_DECL) expr = ovl_make (tmpl); ++processing_template_decl; expr = build_concept_check (expr, type, args, tf_warning_or_error); --processing_template_decl; PLACEHOLDER_TYPE_CONSTRAINTS (type) = expr; /* Our canonical type depends on the constraint. */ TYPE_CANONICAL (type) = canonical_type_parameter (type); /* Attach the constraint to the type declaration. */ return TYPE_NAME (type); } /* Make a "constrained auto" type-specifier. */ tree make_constrained_auto (tree con, tree args) { tree type = make_auto_1 (auto_identifier, false); return make_constrained_placeholder_type (type, con, args); } /* Make a "constrained decltype(auto)" type-specifier. */ tree make_constrained_decltype_auto (tree con, tree args) { tree type = make_auto_1 (decltype_auto_identifier, false); return make_constrained_placeholder_type (type, con, args); } /* Build and return a concept definition. Like other templates, the CONCEPT_DECL node is wrapped by a TEMPLATE_DECL. This returns the the TEMPLATE_DECL. */ tree finish_concept_definition (cp_expr id, tree init) { gcc_assert (identifier_p (id)); gcc_assert (processing_template_decl); location_t loc = id.get_location(); /* A concept-definition shall not have associated constraints. */ if (TEMPLATE_PARMS_CONSTRAINTS (current_template_parms)) { error_at (loc, "a concept cannot be constrained"); TEMPLATE_PARMS_CONSTRAINTS (current_template_parms) = NULL_TREE; } /* A concept-definition shall appear in namespace scope. Templates aren't allowed in block scope, so we only need to check for class scope. */ if (TYPE_P (current_scope()) || !DECL_NAMESPACE_SCOPE_P (current_scope ())) { error_at (loc, "concept %qE not in namespace scope", *id); return error_mark_node; } /* Initially build the concept declaration; its type is bool. */ tree decl = build_lang_decl_loc (loc, CONCEPT_DECL, *id, boolean_type_node); DECL_CONTEXT (decl) = current_scope (); DECL_INITIAL (decl) = init; /* Push the enclosing template. */ return push_template_decl (decl); } /* Given type ARG, return std::initializer_list<ARG>. */ static tree listify (tree arg) { tree std_init_list = get_namespace_binding (std_node, init_list_identifier); if (!std_init_list || !DECL_CLASS_TEMPLATE_P (std_init_list)) { gcc_rich_location richloc (input_location); maybe_add_include_fixit (&richloc, "<initializer_list>", false); error_at (&richloc, "deducing from brace-enclosed initializer list" " requires %<#include <initializer_list>%>"); return error_mark_node; } tree argvec = make_tree_vec (1); TREE_VEC_ELT (argvec, 0) = arg; return lookup_template_class (std_init_list, argvec, NULL_TREE, NULL_TREE, 0, tf_warning_or_error); } /* Replace auto in TYPE with std::initializer_list<auto>. */ static tree listify_autos (tree type, tree auto_node) { tree init_auto = listify (strip_top_quals (auto_node)); tree argvec = make_tree_vec (1); TREE_VEC_ELT (argvec, 0) = init_auto; if (processing_template_decl) argvec = add_to_template_args (current_template_args (), argvec); return tsubst (type, argvec, tf_warning_or_error, NULL_TREE); } /* Hash traits for hashing possibly constrained 'auto' TEMPLATE_TYPE_PARMs for use by do_auto_deduction. */ struct auto_hash : default_hash_traits<tree> { static inline hashval_t hash (tree); static inline bool equal (tree, tree); }; /* Hash the 'auto' T. */ inline hashval_t auto_hash::hash (tree t) { if (tree c = NON_ERROR (PLACEHOLDER_TYPE_CONSTRAINTS (t))) /* Matching constrained-type-specifiers denote the same template parameter, so hash the constraint. */ return hash_placeholder_constraint (c); else /* But unconstrained autos are all separate, so just hash the pointer. */ return iterative_hash_object (t, 0); } /* Compare two 'auto's. */ inline bool auto_hash::equal (tree t1, tree t2) { if (t1 == t2) return true; tree c1 = PLACEHOLDER_TYPE_CONSTRAINTS (t1); tree c2 = PLACEHOLDER_TYPE_CONSTRAINTS (t2); /* Two unconstrained autos are distinct. */ if (!c1 || !c2) return false; return equivalent_placeholder_constraints (c1, c2); } /* for_each_template_parm callback for extract_autos: if t is a (possibly constrained) auto, add it to the vector. */ static int extract_autos_r (tree t, void *data) { hash_table<auto_hash> &hash = *(hash_table<auto_hash>*)data; if (is_auto (t)) { /* All the autos were built with index 0; fix that up now. */ tree *p = hash.find_slot (t, INSERT); unsigned idx; if (*p) /* If this is a repeated constrained-type-specifier, use the index we chose before. */ idx = TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (*p)); else { /* Otherwise this is new, so use the current count. */ *p = t; idx = hash.elements () - 1; } TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (t)) = idx; } /* Always keep walking. */ return 0; } /* Return a TREE_VEC of the 'auto's used in type under the Concepts TS, which says they can appear anywhere in the type. */ static tree extract_autos (tree type) { hash_set<tree> visited; hash_table<auto_hash> hash (2); for_each_template_parm (type, extract_autos_r, &hash, &visited, true); tree tree_vec = make_tree_vec (hash.elements()); for (hash_table<auto_hash>::iterator iter = hash.begin(); iter != hash.end(); ++iter) { tree elt = *iter; unsigned i = TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (elt)); TREE_VEC_ELT (tree_vec, i) = build_tree_list (NULL_TREE, TYPE_NAME (elt)); } return tree_vec; } /* The stem for deduction guide names. */ const char *const dguide_base = "__dguide_"; /* Return the name for a deduction guide for class template TMPL. */ tree dguide_name (tree tmpl) { tree type = (TYPE_P (tmpl) ? tmpl : TREE_TYPE (tmpl)); tree tname = TYPE_IDENTIFIER (type); char *buf = (char *) alloca (1 + strlen (dguide_base) + IDENTIFIER_LENGTH (tname)); memcpy (buf, dguide_base, strlen (dguide_base)); memcpy (buf + strlen (dguide_base), IDENTIFIER_POINTER (tname), IDENTIFIER_LENGTH (tname) + 1); tree dname = get_identifier (buf); TREE_TYPE (dname) = type; return dname; } /* True if NAME is the name of a deduction guide. */ bool dguide_name_p (tree name) { return (TREE_CODE (name) == IDENTIFIER_NODE && TREE_TYPE (name) && !strncmp (IDENTIFIER_POINTER (name), dguide_base, strlen (dguide_base))); } /* True if FN is a deduction guide. */ bool deduction_guide_p (const_tree fn) { if (DECL_P (fn)) if (tree name = DECL_NAME (fn)) return dguide_name_p (name); return false; } /* True if FN is the copy deduction guide, i.e. A(A)->A. */ bool copy_guide_p (const_tree fn) { gcc_assert (deduction_guide_p (fn)); if (!DECL_ARTIFICIAL (fn)) return false; tree parms = FUNCTION_FIRST_USER_PARMTYPE (DECL_TI_TEMPLATE (fn)); return (TREE_CHAIN (parms) == void_list_node && same_type_p (TREE_VALUE (parms), TREE_TYPE (DECL_NAME (fn)))); } /* True if FN is a guide generated from a constructor template. */ bool template_guide_p (const_tree fn) { gcc_assert (deduction_guide_p (fn)); if (!DECL_ARTIFICIAL (fn)) return false; tree tmpl = DECL_TI_TEMPLATE (fn); if (tree org = DECL_ABSTRACT_ORIGIN (tmpl)) return PRIMARY_TEMPLATE_P (org); return false; } /* True if FN is an aggregate initialization guide or the copy deduction guide. */ bool builtin_guide_p (const_tree fn) { if (!deduction_guide_p (fn)) return false; if (!DECL_ARTIFICIAL (fn)) /* Explicitly declared. */ return false; if (DECL_ABSTRACT_ORIGIN (fn)) /* Derived from a constructor. */ return false; return true; } /* OLDDECL is a _DECL for a template parameter. Return a similar parameter at LEVEL:INDEX, using tsubst_args and complain for substitution into non-type template parameter types. Note that the handling of template template parameters relies on current_template_parms being set appropriately for the new template. */ static tree rewrite_template_parm (tree olddecl, unsigned index, unsigned level, tree tsubst_args, tsubst_flags_t complain) { if (olddecl == error_mark_node) return error_mark_node; tree oldidx = get_template_parm_index (olddecl); tree newtype; if (TREE_CODE (olddecl) == TYPE_DECL || TREE_CODE (olddecl) == TEMPLATE_DECL) { tree oldtype = TREE_TYPE (olddecl); newtype = cxx_make_type (TREE_CODE (oldtype)); TYPE_MAIN_VARIANT (newtype) = newtype; if (TREE_CODE (oldtype) == TEMPLATE_TYPE_PARM) TEMPLATE_TYPE_PARM_FOR_CLASS (newtype) = TEMPLATE_TYPE_PARM_FOR_CLASS (oldtype); } else { newtype = TREE_TYPE (olddecl); if (type_uses_auto (newtype)) { // Substitute once to fix references to other template parameters. newtype = tsubst (newtype, tsubst_args, complain|tf_partial, NULL_TREE); // Now substitute again to reduce the level of the auto. newtype = tsubst (newtype, current_template_args (), complain, NULL_TREE); } else newtype = tsubst (newtype, tsubst_args, complain, NULL_TREE); } tree newdecl = build_decl (DECL_SOURCE_LOCATION (olddecl), TREE_CODE (olddecl), DECL_NAME (olddecl), newtype); SET_DECL_TEMPLATE_PARM_P (newdecl); tree newidx; if (TREE_CODE (olddecl) == TYPE_DECL || TREE_CODE (olddecl) == TEMPLATE_DECL) { newidx = TEMPLATE_TYPE_PARM_INDEX (newtype) = build_template_parm_index (index, level, level, newdecl, newtype); TEMPLATE_PARM_PARAMETER_PACK (newidx) = TEMPLATE_PARM_PARAMETER_PACK (oldidx); TYPE_STUB_DECL (newtype) = TYPE_NAME (newtype) = newdecl; if (TYPE_STRUCTURAL_EQUALITY_P (TREE_TYPE (olddecl))) SET_TYPE_STRUCTURAL_EQUALITY (newtype); else TYPE_CANONICAL (newtype) = canonical_type_parameter (newtype); if (TREE_CODE (olddecl) == TEMPLATE_DECL) { DECL_TEMPLATE_RESULT (newdecl) = build_decl (DECL_SOURCE_LOCATION (olddecl), TYPE_DECL, DECL_NAME (olddecl), newtype); DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (newdecl)) = true; // First create a copy (ttargs) of tsubst_args with an // additional level for the template template parameter's own // template parameters (ttparms). tree ttparms = (INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (olddecl))); const int depth = TMPL_ARGS_DEPTH (tsubst_args); tree ttargs = make_tree_vec (depth + 1); for (int i = 0; i < depth; ++i) TREE_VEC_ELT (ttargs, i) = TREE_VEC_ELT (tsubst_args, i); TREE_VEC_ELT (ttargs, depth) = template_parms_level_to_args (ttparms); // Substitute ttargs into ttparms to fix references to // other template parameters. ttparms = tsubst_template_parms_level (ttparms, ttargs, complain|tf_partial); // Now substitute again with args based on tparms, to reduce // the level of the ttparms. ttargs = current_template_args (); ttparms = tsubst_template_parms_level (ttparms, ttargs, complain); // Finally, tack the adjusted parms onto tparms. ttparms = tree_cons (size_int (depth), ttparms, current_template_parms); DECL_TEMPLATE_PARMS (newdecl) = ttparms; } } else { tree oldconst = TEMPLATE_PARM_DECL (oldidx); tree newconst = build_decl (DECL_SOURCE_LOCATION (oldconst), TREE_CODE (oldconst), DECL_NAME (oldconst), newtype); TREE_CONSTANT (newconst) = TREE_CONSTANT (newdecl) = TREE_READONLY (newconst) = TREE_READONLY (newdecl) = true; SET_DECL_TEMPLATE_PARM_P (newconst); newidx = build_template_parm_index (index, level, level, newconst, newtype); TEMPLATE_PARM_PARAMETER_PACK (newidx) = TEMPLATE_PARM_PARAMETER_PACK (oldidx); DECL_INITIAL (newdecl) = DECL_INITIAL (newconst) = newidx; } return newdecl; } /* As rewrite_template_parm, but for the whole TREE_LIST representing a template parameter. */ static tree rewrite_tparm_list (tree oldelt, unsigned index, unsigned level, tree targs, unsigned targs_index, tsubst_flags_t complain) { tree olddecl = TREE_VALUE (oldelt); tree newdecl = rewrite_template_parm (olddecl, index, level, targs, complain); if (newdecl == error_mark_node) return error_mark_node; tree newdef = tsubst_template_arg (TREE_PURPOSE (oldelt), targs, complain, NULL_TREE); tree list = build_tree_list (newdef, newdecl); TEMPLATE_PARM_CONSTRAINTS (list) = tsubst_constraint_info (TEMPLATE_PARM_CONSTRAINTS (oldelt), targs, complain, NULL_TREE); int depth = TMPL_ARGS_DEPTH (targs); TMPL_ARG (targs, depth, targs_index) = template_parm_to_arg (list); return list; } /* Returns a C++17 class deduction guide template based on the constructor CTOR. As a special case, CTOR can be a RECORD_TYPE for an implicit default guide, REFERENCE_TYPE for an implicit copy/move guide, or TREE_LIST for an aggregate initialization guide. */ static tree build_deduction_guide (tree type, tree ctor, tree outer_args, tsubst_flags_t complain) { tree tparms, targs, fparms, fargs, ci; bool memtmpl = false; bool explicit_p; location_t loc; tree fn_tmpl = NULL_TREE; if (outer_args) { ++processing_template_decl; type = tsubst (type, outer_args, complain, CLASSTYPE_TI_TEMPLATE (type)); --processing_template_decl; } if (!DECL_DECLARES_FUNCTION_P (ctor)) { if (TYPE_P (ctor)) { bool copy_p = TYPE_REF_P (ctor); if (copy_p) fparms = tree_cons (NULL_TREE, type, void_list_node); else fparms = void_list_node; } else if (TREE_CODE (ctor) == TREE_LIST) fparms = ctor; else gcc_unreachable (); tree ctmpl = CLASSTYPE_TI_TEMPLATE (type); tparms = DECL_TEMPLATE_PARMS (ctmpl); targs = CLASSTYPE_TI_ARGS (type); ci = NULL_TREE; fargs = NULL_TREE; loc = DECL_SOURCE_LOCATION (ctmpl); explicit_p = false; } else { ++processing_template_decl; bool ok = true; fn_tmpl = (TREE_CODE (ctor) == TEMPLATE_DECL ? ctor : DECL_TI_TEMPLATE (ctor)); if (outer_args) fn_tmpl = tsubst (fn_tmpl, outer_args, complain, ctor); ctor = DECL_TEMPLATE_RESULT (fn_tmpl); tparms = DECL_TEMPLATE_PARMS (fn_tmpl); /* If type is a member class template, DECL_TI_ARGS (ctor) will have fully specialized args for the enclosing class. Strip those off, as the deduction guide won't have those template parameters. */ targs = get_innermost_template_args (DECL_TI_ARGS (ctor), TMPL_PARMS_DEPTH (tparms)); /* Discard the 'this' parameter. */ fparms = FUNCTION_ARG_CHAIN (ctor); fargs = TREE_CHAIN (DECL_ARGUMENTS (ctor)); ci = get_constraints (ctor); loc = DECL_SOURCE_LOCATION (ctor); explicit_p = DECL_NONCONVERTING_P (ctor); if (PRIMARY_TEMPLATE_P (fn_tmpl)) { memtmpl = true; /* For a member template constructor, we need to flatten the two template parameter lists into one, and then adjust the function signature accordingly. This gets...complicated. */ tree save_parms = current_template_parms; /* For a member template we should have two levels of parms/args, one for the class and one for the constructor. We stripped specialized args for further enclosing classes above. */ const int depth = 2; gcc_assert (TMPL_ARGS_DEPTH (targs) == depth); /* Template args for translating references to the two-level template parameters into references to the one-level template parameters we are creating. */ tree tsubst_args = copy_node (targs); TMPL_ARGS_LEVEL (tsubst_args, depth) = copy_node (TMPL_ARGS_LEVEL (tsubst_args, depth)); /* Template parms for the constructor template. */ tree ftparms = TREE_VALUE (tparms); unsigned flen = TREE_VEC_LENGTH (ftparms); /* Template parms for the class template. */ tparms = TREE_CHAIN (tparms); tree ctparms = TREE_VALUE (tparms); unsigned clen = TREE_VEC_LENGTH (ctparms); /* Template parms for the deduction guide start as a copy of the template parms for the class. We set current_template_parms for lookup_template_class_1. */ current_template_parms = tparms = copy_node (tparms); tree new_vec = TREE_VALUE (tparms) = make_tree_vec (flen + clen); for (unsigned i = 0; i < clen; ++i) TREE_VEC_ELT (new_vec, i) = TREE_VEC_ELT (ctparms, i); /* Now we need to rewrite the constructor parms to append them to the class parms. */ for (unsigned i = 0; i < flen; ++i) { unsigned index = i + clen; unsigned level = 1; tree oldelt = TREE_VEC_ELT (ftparms, i); tree newelt = rewrite_tparm_list (oldelt, index, level, tsubst_args, i, complain); if (newelt == error_mark_node) ok = false; TREE_VEC_ELT (new_vec, index) = newelt; } /* Now we have a final set of template parms to substitute into the function signature. */ targs = template_parms_to_args (tparms); fparms = tsubst_arg_types (fparms, tsubst_args, NULL_TREE, complain, ctor); if (fparms == error_mark_node) ok = false; if (ci) ci = tsubst_constraint_info (ci, tsubst_args, complain, ctor); /* Parms are to have DECL_CHAIN tsubsted, which would be skipped if cp_unevaluated_operand. */ cp_evaluated ev; fargs = tsubst (fargs, tsubst_args, complain, ctor); current_template_parms = save_parms; } else { /* Substitute in the same arguments to rewrite class members into references to members of an unknown specialization. */ cp_evaluated ev; fparms = tsubst_arg_types (fparms, targs, NULL_TREE, complain, ctor); fargs = tsubst (fargs, targs, complain, ctor); if (ci) ci = tsubst_constraint_info (ci, targs, complain, ctor); } --processing_template_decl; if (!ok) return error_mark_node; } if (!memtmpl) { /* Copy the parms so we can set DECL_PRIMARY_TEMPLATE. */ tparms = copy_node (tparms); INNERMOST_TEMPLATE_PARMS (tparms) = copy_node (INNERMOST_TEMPLATE_PARMS (tparms)); } tree fntype = build_function_type (type, fparms); tree ded_fn = build_lang_decl_loc (loc, FUNCTION_DECL, dguide_name (type), fntype); DECL_ARGUMENTS (ded_fn) = fargs; DECL_ARTIFICIAL (ded_fn) = true; DECL_NONCONVERTING_P (ded_fn) = explicit_p; tree ded_tmpl = build_template_decl (ded_fn, tparms, /*member*/false); DECL_ARTIFICIAL (ded_tmpl) = true; DECL_TEMPLATE_INFO (ded_fn) = build_template_info (ded_tmpl, targs); DECL_PRIMARY_TEMPLATE (ded_tmpl) = ded_tmpl; if (DECL_P (ctor)) DECL_ABSTRACT_ORIGIN (ded_tmpl) = fn_tmpl; if (ci) set_constraints (ded_tmpl, ci); return ded_tmpl; } /* Add to LIST the member types for the reshaped initializer CTOR. */ static tree collect_ctor_idx_types (tree ctor, tree list, tree elt = NULL_TREE) { vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (ctor); tree idx, val; unsigned i; FOR_EACH_CONSTRUCTOR_ELT (v, i, idx, val) { tree ftype = elt ? elt : TREE_TYPE (idx); if (BRACE_ENCLOSED_INITIALIZER_P (val) && CONSTRUCTOR_NELTS (val) /* As in reshape_init_r, a non-aggregate or array-of-dependent-bound type gets a single initializer. */ && CP_AGGREGATE_TYPE_P (ftype) && !(TREE_CODE (ftype) == ARRAY_TYPE && uses_template_parms (TYPE_DOMAIN (ftype)))) { tree subelt = NULL_TREE; if (TREE_CODE (ftype) == ARRAY_TYPE) subelt = TREE_TYPE (ftype); list = collect_ctor_idx_types (val, list, subelt); continue; } tree arg = NULL_TREE; if (i == v->length() - 1 && PACK_EXPANSION_P (ftype)) /* Give the trailing pack expansion parameter a default argument to match aggregate initialization behavior, even if we deduce the length of the pack separately to more than we have initializers. */ arg = build_constructor (init_list_type_node, NULL); /* if ei is of array type and xi is a braced-init-list or string literal, Ti is an rvalue reference to the declared type of ei */ STRIP_ANY_LOCATION_WRAPPER (val); if (TREE_CODE (ftype) == ARRAY_TYPE && (BRACE_ENCLOSED_INITIALIZER_P (val) || TREE_CODE (val) == STRING_CST)) { if (TREE_CODE (val) == STRING_CST) ftype = cp_build_qualified_type (ftype, cp_type_quals (ftype) | TYPE_QUAL_CONST); ftype = (cp_build_reference_type (ftype, BRACE_ENCLOSED_INITIALIZER_P (val))); } list = tree_cons (arg, ftype, list); } return list; } /* Return whether ETYPE is, or is derived from, a specialization of TMPL. */ static bool is_spec_or_derived (tree etype, tree tmpl) { if (!etype || !CLASS_TYPE_P (etype)) return false; tree type = TREE_TYPE (tmpl); tree tparms = (INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl))); tree targs = make_tree_vec (TREE_VEC_LENGTH (tparms)); int err = unify (tparms, targs, type, etype, UNIFY_ALLOW_DERIVED, /*explain*/false); ggc_free (targs); return !err; } /* Return a C++20 aggregate deduction candidate for TYPE initialized from INIT. */ static tree maybe_aggr_guide (tree tmpl, tree init, vec<tree,va_gc> *args) { if (cxx_dialect < cxx20) return NULL_TREE; if (init == NULL_TREE) return NULL_TREE; tree type = TREE_TYPE (tmpl); if (!CP_AGGREGATE_TYPE_P (type)) return NULL_TREE; /* No aggregate candidate for copy-initialization. */ if (args->length() == 1) { tree val = (*args)[0]; if (is_spec_or_derived (tmpl, TREE_TYPE (val))) return NULL_TREE; } /* If we encounter a problem, we just won't add the candidate. */ tsubst_flags_t complain = tf_none; tree parms = NULL_TREE; if (BRACE_ENCLOSED_INITIALIZER_P (init)) { init = reshape_init (type, init, complain); if (init == error_mark_node) return NULL_TREE; parms = collect_ctor_idx_types (init, parms); } else if (TREE_CODE (init) == TREE_LIST) { int len = list_length (init); for (tree field = TYPE_FIELDS (type); len; --len, field = DECL_CHAIN (field)) { field = next_initializable_field (field); if (!field) return NULL_TREE; tree ftype = finish_decltype_type (field, true, complain); parms = tree_cons (NULL_TREE, ftype, parms); } } else /* Aggregate initialization doesn't apply to an initializer expression. */ return NULL_TREE; if (parms) { tree last = parms; parms = nreverse (parms); TREE_CHAIN (last) = void_list_node; tree guide = build_deduction_guide (type, parms, NULL_TREE, complain); return guide; } return NULL_TREE; } /* UGUIDES are the deduction guides for the underlying template of alias template TMPL; adjust them to be deduction guides for TMPL. */ static tree alias_ctad_tweaks (tree tmpl, tree uguides) { /* [over.match.class.deduct]: When resolving a placeholder for a deduced class type (9.2.8.2) where the template-name names an alias template A, the defining-type-id of A must be of the form typename(opt) nested-name-specifier(opt) template(opt) simple-template-id as specified in 9.2.8.2. The guides of A are the set of functions or function templates formed as follows. For each function or function template f in the guides of the template named by the simple-template-id of the defining-type-id, the template arguments of the return type of f are deduced from the defining-type-id of A according to the process in 13.10.2.5 with the exception that deduction does not fail if not all template arguments are deduced. Let g denote the result of substituting these deductions into f. If substitution succeeds, form a function or function template f' with the following properties and add it to the set of guides of A: * The function type of f' is the function type of g. * If f is a function template, f' is a function template whose template parameter list consists of all the template parameters of A (including their default template arguments) that appear in the above deductions or (recursively) in their default template arguments, followed by the template parameters of f that were not deduced (including their default template arguments), otherwise f' is not a function template. * The associated constraints (13.5.2) are the conjunction of the associated constraints of g and a constraint that is satisfied if and only if the arguments of A are deducible (see below) from the return type. * If f is a copy deduction candidate (12.4.1.8), then f' is considered to be so as well. * If f was generated from a deduction-guide (12.4.1.8), then f' is considered to be so as well. * The explicit-specifier of f' is the explicit-specifier of g (if any). */ /* This implementation differs from the above in two significant ways: 1) We include all template parameters of A, not just some. 2) The added constraint is same_type instead of deducible. I believe that while it's probably possible to construct a testcase that behaves differently with this simplification, it should have the same effect for real uses. Including all template parameters means that we deduce all parameters of A when resolving the call, so when we're in the constraint we don't need to deduce them again, we can just check whether the deduction produced the desired result. */ tsubst_flags_t complain = tf_warning_or_error; tree atype = TREE_TYPE (tmpl); tree aguides = NULL_TREE; tree atparms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (tmpl)); unsigned natparms = TREE_VEC_LENGTH (atparms); tree utype = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); for (ovl_iterator iter (uguides); iter; ++iter) { tree f = *iter; tree in_decl = f; location_t loc = DECL_SOURCE_LOCATION (f); tree ret = TREE_TYPE (TREE_TYPE (f)); tree fprime = f; if (TREE_CODE (f) == TEMPLATE_DECL) { processing_template_decl_sentinel ptds (/*reset*/false); ++processing_template_decl; /* Deduce template arguments for f from the type-id of A. */ tree ftparms = INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (f)); unsigned len = TREE_VEC_LENGTH (ftparms); tree targs = make_tree_vec (len); int err = unify (ftparms, targs, ret, utype, UNIFY_ALLOW_NONE, false); gcc_assert (!err); /* The number of parms for f' is the number of parms for A plus non-deduced parms of f. */ unsigned ndlen = 0; unsigned j; for (unsigned i = 0; i < len; ++i) if (TREE_VEC_ELT (targs, i) == NULL_TREE) ++ndlen; tree gtparms = make_tree_vec (natparms + ndlen); /* First copy over the parms of A. */ for (j = 0; j < natparms; ++j) TREE_VEC_ELT (gtparms, j) = TREE_VEC_ELT (atparms, j); /* Now rewrite the non-deduced parms of f. */ for (unsigned i = 0; ndlen && i < len; ++i) if (TREE_VEC_ELT (targs, i) == NULL_TREE) { --ndlen; unsigned index = j++; unsigned level = 1; tree oldlist = TREE_VEC_ELT (ftparms, i); tree list = rewrite_tparm_list (oldlist, index, level, targs, i, complain); TREE_VEC_ELT (gtparms, index) = list; } gtparms = build_tree_list (size_one_node, gtparms); /* Substitute the deduced arguments plus the rewritten template parameters into f to get g. This covers the type, copyness, guideness, and explicit-specifier. */ tree g = tsubst_decl (DECL_TEMPLATE_RESULT (f), targs, complain); if (g == error_mark_node) return error_mark_node; DECL_USE_TEMPLATE (g) = 0; fprime = build_template_decl (g, gtparms, false); DECL_TEMPLATE_RESULT (fprime) = g; TREE_TYPE (fprime) = TREE_TYPE (g); tree gtargs = template_parms_to_args (gtparms); DECL_TEMPLATE_INFO (g) = build_template_info (fprime, gtargs); DECL_PRIMARY_TEMPLATE (fprime) = fprime; /* Substitute the associated constraints. */ tree ci = get_constraints (f); if (ci) ci = tsubst_constraint_info (ci, targs, complain, in_decl); if (ci == error_mark_node) return error_mark_node; /* Add a constraint that the return type matches the instantiation of A with the same template arguments. */ ret = TREE_TYPE (TREE_TYPE (fprime)); if (!same_type_p (atype, ret) /* FIXME this should mean they don't compare as equivalent. */ || dependent_alias_template_spec_p (atype, nt_opaque)) { tree same = finish_trait_expr (loc, CPTK_IS_SAME_AS, atype, ret); ci = append_constraint (ci, same); } if (ci) { remove_constraints (fprime); set_constraints (fprime, ci); } } else { /* For a non-template deduction guide, if the arguments of A aren't deducible from the return type, don't add the candidate. */ tree targs = make_tree_vec (natparms); int err = unify (atparms, targs, utype, ret, UNIFY_ALLOW_NONE, false); for (unsigned i = 0; !err && i < natparms; ++i) if (TREE_VEC_ELT (targs, i) == NULL_TREE) err = true; if (err) continue; } aguides = lookup_add (fprime, aguides); } return aguides; } /* Return artificial deduction guides built from the constructors of class template TMPL. */ static tree ctor_deduction_guides_for (tree tmpl, tsubst_flags_t complain) { tree type = TREE_TYPE (tmpl); tree outer_args = NULL_TREE; if (DECL_CLASS_SCOPE_P (tmpl) && CLASSTYPE_TEMPLATE_INSTANTIATION (DECL_CONTEXT (tmpl))) { outer_args = CLASSTYPE_TI_ARGS (DECL_CONTEXT (tmpl)); type = TREE_TYPE (most_general_template (tmpl)); } tree cands = NULL_TREE; for (ovl_iterator iter (CLASSTYPE_CONSTRUCTORS (type)); iter; ++iter) { /* Skip inherited constructors. */ if (iter.using_p ()) continue; tree guide = build_deduction_guide (type, *iter, outer_args, complain); cands = lookup_add (guide, cands); } /* Add implicit default constructor deduction guide. */ if (!TYPE_HAS_USER_CONSTRUCTOR (type)) { tree guide = build_deduction_guide (type, type, outer_args, complain); cands = lookup_add (guide, cands); } /* Add copy guide. */ { tree gtype = build_reference_type (type); tree guide = build_deduction_guide (type, gtype, outer_args, complain); cands = lookup_add (guide, cands); } return cands; } static GTY((deletable)) hash_map<tree, tree_pair_p> *dguide_cache; /* Return the non-aggregate deduction guides for deducible template TMPL. The aggregate candidate is added separately because it depends on the initializer. Set ANY_DGUIDES_P if we find a non-implicit deduction guide. */ static tree deduction_guides_for (tree tmpl, bool &any_dguides_p, tsubst_flags_t complain) { tree guides = NULL_TREE; if (DECL_ALIAS_TEMPLATE_P (tmpl)) { tree under = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); tree tinfo = get_template_info (under); guides = deduction_guides_for (TI_TEMPLATE (tinfo), any_dguides_p, complain); } else { guides = lookup_qualified_name (CP_DECL_CONTEXT (tmpl), dguide_name (tmpl), LOOK_want::NORMAL, /*complain*/false); if (guides == error_mark_node) guides = NULL_TREE; else any_dguides_p = true; } /* Cache the deduction guides for a template. We also remember the result of lookup, and rebuild everything if it changes; should be very rare. */ tree_pair_p cache = NULL; if (tree_pair_p &r = hash_map_safe_get_or_insert<hm_ggc> (dguide_cache, tmpl)) { cache = r; if (cache->purpose == guides) return cache->value; } else { r = cache = ggc_cleared_alloc<tree_pair_s> (); cache->purpose = guides; } tree cands = NULL_TREE; if (DECL_ALIAS_TEMPLATE_P (tmpl)) cands = alias_ctad_tweaks (tmpl, guides); else { cands = ctor_deduction_guides_for (tmpl, complain); for (ovl_iterator it (guides); it; ++it) cands = lookup_add (*it, cands); } cache->value = cands; return cands; } /* Return whether TMPL is a (class template argument-) deducible template. */ bool ctad_template_p (tree tmpl) { /* A deducible template is either a class template or is an alias template whose defining-type-id is of the form typename(opt) nested-name-specifier(opt) template(opt) simple-template-id where the nested-name-specifier (if any) is non-dependent and the template-name of the simple-template-id names a deducible template. */ if (DECL_CLASS_TEMPLATE_P (tmpl) || DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl)) return true; if (!DECL_ALIAS_TEMPLATE_P (tmpl)) return false; tree orig = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl)); if (tree tinfo = get_template_info (orig)) return ctad_template_p (TI_TEMPLATE (tinfo)); return false; } /* Deduce template arguments for the class template placeholder PTYPE for template TMPL based on the initializer INIT, and return the resulting type. */ static tree do_class_deduction (tree ptype, tree tmpl, tree init, int flags, tsubst_flags_t complain) { /* We should have handled this in the caller. */ if (DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl)) return ptype; /* Look through alias templates that just rename another template. */ tmpl = get_underlying_template (tmpl); if (!ctad_template_p (tmpl)) { if (complain & tf_error) error ("non-deducible template %qT used without template arguments", tmpl); return error_mark_node; } else if (cxx_dialect < cxx20 && DECL_ALIAS_TEMPLATE_P (tmpl)) { /* This doesn't affect conforming C++17 code, so just pedwarn. */ if (complain & tf_warning_or_error) pedwarn (input_location, 0, "alias template deduction only available " "with %<-std=c++20%> or %<-std=gnu++20%>"); } if (init && TREE_TYPE (init) == ptype) /* Using the template parm as its own argument. */ return ptype; tree type = TREE_TYPE (tmpl); bool try_list_ctor = false; bool list_init_p = false; releasing_vec rv_args = NULL; vec<tree,va_gc> *&args = *&rv_args; if (init == NULL_TREE) args = make_tree_vector (); else if (BRACE_ENCLOSED_INITIALIZER_P (init)) { list_init_p = true; try_list_ctor = TYPE_HAS_LIST_CTOR (type); if (try_list_ctor && CONSTRUCTOR_NELTS (init) == 1) { /* As an exception, the first phase in 16.3.1.7 (considering the initializer list as a single argument) is omitted if the initializer list consists of a single expression of type cv U, where U is a specialization of C or a class derived from a specialization of C. */ tree elt = CONSTRUCTOR_ELT (init, 0)->value; if (is_spec_or_derived (TREE_TYPE (elt), tmpl)) try_list_ctor = false; } if (try_list_ctor || is_std_init_list (type)) args = make_tree_vector_single (init); else args = make_tree_vector_from_ctor (init); } else if (TREE_CODE (init) == TREE_LIST) args = make_tree_vector_from_list (init); else args = make_tree_vector_single (init); /* Do this now to avoid problems with erroneous args later on. */ args = resolve_args (args, complain); if (args == NULL) return error_mark_node; bool any_dguides_p = false; tree cands = deduction_guides_for (tmpl, any_dguides_p, complain); if (cands == error_mark_node) return error_mark_node; /* Prune explicit deduction guides in copy-initialization context (but not copy-list-initialization). */ bool elided = false; if (!list_init_p && (flags & LOOKUP_ONLYCONVERTING)) { for (lkp_iterator iter (cands); !elided && iter; ++iter) if (DECL_NONCONVERTING_P (STRIP_TEMPLATE (*iter))) elided = true; if (elided) { /* Found a nonconverting guide, prune the candidates. */ tree pruned = NULL_TREE; for (lkp_iterator iter (cands); iter; ++iter) if (!DECL_NONCONVERTING_P (STRIP_TEMPLATE (*iter))) pruned = lookup_add (*iter, pruned); cands = pruned; } } if (tree guide = maybe_aggr_guide (tmpl, init, args)) cands = lookup_add (guide, cands); tree call = error_mark_node; /* If this is list-initialization and the class has a list constructor, first try deducing from the list as a single argument, as [over.match.list]. */ tree list_cands = NULL_TREE; if (try_list_ctor && cands) for (lkp_iterator iter (cands); iter; ++iter) { tree dg = *iter; if (is_list_ctor (dg)) list_cands = lookup_add (dg, list_cands); } if (list_cands) { ++cp_unevaluated_operand; call = build_new_function_call (list_cands, &args, tf_decltype); --cp_unevaluated_operand; if (call == error_mark_node) { /* That didn't work, now try treating the list as a sequence of arguments. */ release_tree_vector (args); args = make_tree_vector_from_ctor (init); } } if (elided && !cands) { error ("cannot deduce template arguments for copy-initialization" " of %qT, as it has no non-explicit deduction guides or " "user-declared constructors", type); return error_mark_node; } else if (!cands && call == error_mark_node) { error ("cannot deduce template arguments of %qT, as it has no viable " "deduction guides", type); return error_mark_node; } if (call == error_mark_node) { ++cp_unevaluated_operand; call = build_new_function_call (cands, &args, tf_decltype); --cp_unevaluated_operand; } if (call == error_mark_node) { if (complain & tf_warning_or_error) { error ("class template argument deduction failed:"); ++cp_unevaluated_operand; call = build_new_function_call (cands, &args, complain | tf_decltype); --cp_unevaluated_operand; if (elided) inform (input_location, "explicit deduction guides not considered " "for copy-initialization"); } return error_mark_node; } /* [over.match.list]/1: In copy-list-initialization, if an explicit constructor is chosen, the initialization is ill-formed. */ else if (flags & LOOKUP_ONLYCONVERTING) { tree fndecl = cp_get_callee_fndecl_nofold (call); if (fndecl && DECL_NONCONVERTING_P (fndecl)) { if (complain & tf_warning_or_error) { // TODO: Pass down location from cp_finish_decl. error ("class template argument deduction for %qT failed: " "explicit deduction guide selected in " "copy-list-initialization", type); inform (DECL_SOURCE_LOCATION (fndecl), "explicit deduction guide declared here"); } return error_mark_node; } } /* If CTAD succeeded but the type doesn't have any explicit deduction guides, this deduction might not be what the user intended. */ if (call != error_mark_node && !any_dguides_p) { tree fndecl = cp_get_callee_fndecl_nofold (call); if (fndecl != NULL_TREE && (!DECL_IN_SYSTEM_HEADER (fndecl) || global_dc->dc_warn_system_headers) && warning (OPT_Wctad_maybe_unsupported, "%qT may not intend to support class template argument " "deduction", type)) inform (input_location, "add a deduction guide to suppress this " "warning"); } return cp_build_qualified_type (TREE_TYPE (call), cp_type_quals (ptype)); } /* Replace occurrences of 'auto' in TYPE with the appropriate type deduced from INIT. AUTO_NODE is the TEMPLATE_TYPE_PARM used for 'auto' in TYPE. The CONTEXT determines the context in which auto deduction is performed and is used to control error diagnostics. FLAGS are the LOOKUP_* flags. OUTER_TARGS are used during template argument deduction (context == adc_unify) to properly substitute the result, and is ignored in other contexts. For partial-concept-ids, extra args may be appended to the list of deduced template arguments prior to determining constraint satisfaction. */ tree do_auto_deduction (tree type, tree init, tree auto_node, tsubst_flags_t complain, auto_deduction_context context, tree outer_targs, int flags) { tree targs; if (init == error_mark_node) return error_mark_node; if (init && type_dependent_expression_p (init) && context != adc_unify) /* Defining a subset of type-dependent expressions that we can deduce from ahead of time isn't worth the trouble. */ return type; /* Similarly, we can't deduce from another undeduced decl. */ if (init && undeduced_auto_decl (init)) return type; /* We may be doing a partial substitution, but we still want to replace auto_node. */ complain &= ~tf_partial; if (tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (auto_node)) /* C++17 class template argument deduction. */ return do_class_deduction (type, tmpl, init, flags, complain); if (init == NULL_TREE || TREE_TYPE (init) == NULL_TREE) /* Nothing we can do with this, even in deduction context. */ return type; /* [dcl.spec.auto]: Obtain P from T by replacing the occurrences of auto with either a new invented type template parameter U or, if the initializer is a braced-init-list (8.5.4), with std::initializer_list<U>. */ if (BRACE_ENCLOSED_INITIALIZER_P (init)) { if (!DIRECT_LIST_INIT_P (init)) type = listify_autos (type, auto_node); else if (CONSTRUCTOR_NELTS (init) == 1) init = CONSTRUCTOR_ELT (init, 0)->value; else { if (complain & tf_warning_or_error) { if (permerror (input_location, "direct-list-initialization of " "%<auto%> requires exactly one element")) inform (input_location, "for deduction to %<std::initializer_list%>, use copy-" "list-initialization (i.e. add %<=%> before the %<{%>)"); } type = listify_autos (type, auto_node); } } if (type == error_mark_node) return error_mark_node; init = resolve_nondeduced_context (init, complain); if (context == adc_decomp_type && auto_node == type && init != error_mark_node && TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE) /* [dcl.decomp]/1 - if decomposition declaration has no ref-qualifiers and initializer has array type, deduce cv-qualified array type. */ return cp_build_qualified_type_real (TREE_TYPE (init), TYPE_QUALS (type), complain); else if (AUTO_IS_DECLTYPE (auto_node)) { tree stripped_init = tree_strip_any_location_wrapper (init); bool id = (DECL_P (stripped_init) || ((TREE_CODE (init) == COMPONENT_REF || TREE_CODE (init) == SCOPE_REF) && !REF_PARENTHESIZED_P (init))); targs = make_tree_vec (1); TREE_VEC_ELT (targs, 0) = finish_decltype_type (init, id, tf_warning_or_error); if (type != auto_node) { if (complain & tf_error) error ("%qT as type rather than plain %<decltype(auto)%>", type); return error_mark_node; } else if (TYPE_QUALS (type) != TYPE_UNQUALIFIED) { if (complain & tf_error) error ("%<decltype(auto)%> cannot be cv-qualified"); return error_mark_node; } } else { if (error_operand_p (init)) return error_mark_node; tree parms = build_tree_list (NULL_TREE, type); tree tparms; if (flag_concepts) tparms = extract_autos (type); else { tparms = make_tree_vec (1); TREE_VEC_ELT (tparms, 0) = build_tree_list (NULL_TREE, TYPE_NAME (auto_node)); } targs = make_tree_vec (TREE_VEC_LENGTH (tparms)); int val = type_unification_real (tparms, targs, parms, &init, 1, 0, DEDUCE_CALL, NULL, /*explain_p=*/false); if (val > 0) { if (processing_template_decl) /* Try again at instantiation time. */ return type; if (type && type != error_mark_node && (complain & tf_error)) /* If type is error_mark_node a diagnostic must have been emitted by now. Also, having a mention to '<type error>' in the diagnostic is not really useful to the user. */ { if (cfun && FNDECL_USED_AUTO (current_function_decl) && (auto_node == DECL_SAVED_AUTO_RETURN_TYPE (current_function_decl)) && LAMBDA_FUNCTION_P (current_function_decl)) error ("unable to deduce lambda return type from %qE", init); else error ("unable to deduce %qT from %qE", type, init); type_unification_real (tparms, targs, parms, &init, 1, 0, DEDUCE_CALL, NULL, /*explain_p=*/true); } return error_mark_node; } } /* Check any placeholder constraints against the deduced type. */ if (flag_concepts && !processing_template_decl) if (tree check = NON_ERROR (PLACEHOLDER_TYPE_CONSTRAINTS (auto_node))) { /* Use the deduced type to check the associated constraints. If we have a partial-concept-id, rebuild the argument list so that we check using the extra arguments. */ check = unpack_concept_check (check); gcc_assert (TREE_CODE (check) == TEMPLATE_ID_EXPR); tree cdecl = TREE_OPERAND (check, 0); if (OVL_P (cdecl)) cdecl = OVL_FIRST (cdecl); tree cargs = TREE_OPERAND (check, 1); if (TREE_VEC_LENGTH (cargs) > 1) { cargs = copy_node (cargs); TREE_VEC_ELT (cargs, 0) = TREE_VEC_ELT (targs, 0); } else cargs = targs; /* Rebuild the check using the deduced arguments. */ check = build_concept_check (cdecl, cargs, tf_none); if (!constraints_satisfied_p (check)) { if (complain & tf_warning_or_error) { auto_diagnostic_group d; switch (context) { case adc_unspecified: case adc_unify: error("placeholder constraints not satisfied"); break; case adc_variable_type: case adc_decomp_type: error ("deduced initializer does not satisfy " "placeholder constraints"); break; case adc_return_type: error ("deduced return type does not satisfy " "placeholder constraints"); break; case adc_requirement: error ("deduced expression type does not satisfy " "placeholder constraints"); break; } diagnose_constraints (input_location, check, targs); } return error_mark_node; } } if (processing_template_decl && context != adc_unify) outer_targs = current_template_args (); targs = add_to_template_args (outer_targs, targs); return tsubst (type, targs, complain, NULL_TREE); } /* Substitutes LATE_RETURN_TYPE for 'auto' in TYPE and returns the result. */ tree splice_late_return_type (tree type, tree late_return_type) { if (late_return_type) { gcc_assert (is_auto (type) || seen_error ()); return late_return_type; } if (tree *auto_node = find_type_usage (&type, is_auto)) { tree idx = get_template_parm_index (*auto_node); if (TEMPLATE_PARM_LEVEL (idx) <= processing_template_decl) { /* In an abbreviated function template we didn't know we were dealing with a function template when we saw the auto return type, so update it to have the correct level. */ tree new_auto = make_auto_1 (TYPE_IDENTIFIER (*auto_node), false); PLACEHOLDER_TYPE_CONSTRAINTS (new_auto) = PLACEHOLDER_TYPE_CONSTRAINTS (*auto_node); TYPE_CANONICAL (new_auto) = canonical_type_parameter (new_auto); new_auto = cp_build_qualified_type (new_auto, TYPE_QUALS (*auto_node)); *auto_node = new_auto; } } return type; } /* Returns true iff TYPE is a TEMPLATE_TYPE_PARM representing 'auto' or 'decltype(auto)' or a deduced class template. */ bool is_auto (const_tree type) { if (TREE_CODE (type) == TEMPLATE_TYPE_PARM && (TYPE_IDENTIFIER (type) == auto_identifier || TYPE_IDENTIFIER (type) == decltype_auto_identifier)) return true; else return false; } /* for_each_template_parm callback for type_uses_auto. */ int is_auto_r (tree tp, void */*data*/) { return is_auto (tp); } /* Returns the TEMPLATE_TYPE_PARM in TYPE representing `auto' iff TYPE contains a use of `auto'. Returns NULL_TREE otherwise. */ tree type_uses_auto (tree type) { if (type == NULL_TREE) return NULL_TREE; else if (flag_concepts) { /* The Concepts TS allows multiple autos in one type-specifier; just return the first one we find, do_auto_deduction will collect all of them. */ if (uses_template_parms (type)) return for_each_template_parm (type, is_auto_r, /*data*/NULL, /*visited*/NULL, /*nondeduced*/false); else return NULL_TREE; } else if (tree *tp = find_type_usage (&type, is_auto)) return *tp; else return NULL_TREE; } /* Report ill-formed occurrences of auto types in ARGUMENTS. If concepts are enabled, auto is acceptable in template arguments, but only when TEMPL identifies a template class. Return TRUE if any such errors were reported. */ bool check_auto_in_tmpl_args (tree tmpl, tree args) { /* If there were previous errors, nevermind. */ if (!args || TREE_CODE (args) != TREE_VEC) return false; /* If TMPL is an identifier, we're parsing and we can't tell yet whether TMPL is supposed to be a type, a function or a variable. We'll only be able to tell during template substitution, so we expect to be called again then. If concepts are enabled and we know we have a type, we're ok. */ if (flag_concepts && (identifier_p (tmpl) || (DECL_P (tmpl) && (DECL_TYPE_TEMPLATE_P (tmpl) || DECL_TEMPLATE_TEMPLATE_PARM_P (tmpl))))) return false; /* Quickly search for any occurrences of auto; usually there won't be any, and then we'll avoid allocating the vector. */ if (!type_uses_auto (args)) return false; bool errors = false; tree vec = extract_autos (args); for (int i = 0; i < TREE_VEC_LENGTH (vec); i++) { tree xauto = TREE_VALUE (TREE_VEC_ELT (vec, i)); error_at (DECL_SOURCE_LOCATION (xauto), "invalid use of %qT in template argument", xauto); errors = true; } return errors; } /* Recursively walk over && expressions searching for EXPR. Return a reference to that expression. */ static tree *find_template_requirement (tree *t, tree key) { if (*t == key) return t; if (TREE_CODE (*t) == TRUTH_ANDIF_EXPR) { if (tree *p = find_template_requirement (&TREE_OPERAND (*t, 0), key)) return p; if (tree *p = find_template_requirement (&TREE_OPERAND (*t, 1), key)) return p; } return 0; } /* Convert the generic type parameters in PARM that match the types given in the range [START_IDX, END_IDX) from the current_template_parms into generic type packs. */ tree convert_generic_types_to_packs (tree parm, int start_idx, int end_idx) { tree current = current_template_parms; int depth = TMPL_PARMS_DEPTH (current); current = INNERMOST_TEMPLATE_PARMS (current); tree replacement = make_tree_vec (TREE_VEC_LENGTH (current)); for (int i = 0; i < start_idx; ++i) TREE_VEC_ELT (replacement, i) = TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i))); for (int i = start_idx; i < end_idx; ++i) { /* Create a distinct parameter pack type from the current parm and add it to the replacement args to tsubst below into the generic function parameter. */ tree node = TREE_VEC_ELT (current, i); tree o = TREE_TYPE (TREE_VALUE (node)); tree t = copy_type (o); TEMPLATE_TYPE_PARM_INDEX (t) = reduce_template_parm_level (TEMPLATE_TYPE_PARM_INDEX (o), t, 0, 0, tf_none); TREE_TYPE (TEMPLATE_TYPE_DECL (t)) = t; TYPE_STUB_DECL (t) = TYPE_NAME (t) = TEMPLATE_TYPE_DECL (t); TYPE_MAIN_VARIANT (t) = t; TEMPLATE_TYPE_PARAMETER_PACK (t) = true; TYPE_CANONICAL (t) = canonical_type_parameter (t); TREE_VEC_ELT (replacement, i) = t; /* Replace the current template parameter with new pack. */ TREE_VALUE (node) = TREE_CHAIN (t); /* Surgically adjust the associated constraint of adjusted parameter and it's corresponding contribution to the current template requirements. */ if (tree constr = TEMPLATE_PARM_CONSTRAINTS (node)) { tree id = unpack_concept_check (constr); TREE_VEC_ELT (TREE_OPERAND (id, 1), 0) = t; tree fold = finish_left_unary_fold_expr (constr, TRUTH_ANDIF_EXPR); TEMPLATE_PARM_CONSTRAINTS (node) = fold; /* If there was a constraint, we also need to replace that in the template requirements, which we've already built. */ tree *reqs = &TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); reqs = find_template_requirement (reqs, constr); *reqs = fold; } } for (int i = end_idx, e = TREE_VEC_LENGTH (current); i < e; ++i) TREE_VEC_ELT (replacement, i) = TREE_TYPE (TREE_VALUE (TREE_VEC_ELT (current, i))); /* If there are more levels then build up the replacement with the outer template parms. */ if (depth > 1) replacement = add_to_template_args (template_parms_to_args (TREE_CHAIN (current_template_parms)), replacement); return tsubst (parm, replacement, tf_none, NULL_TREE); } /* __integer_pack(N) in a pack expansion expands to a sequence of numbers from 0..N-1. */ void declare_integer_pack (void) { tree ipfn = push_library_fn (get_identifier ("__integer_pack"), build_function_type_list (integer_type_node, integer_type_node, NULL_TREE), NULL_TREE, ECF_CONST); DECL_DECLARED_CONSTEXPR_P (ipfn) = true; set_decl_built_in_function (ipfn, BUILT_IN_FRONTEND, CP_BUILT_IN_INTEGER_PACK); } /* Set up the hash tables for template instantiations. */ void init_template_processing (void) { decl_specializations = hash_table<spec_hasher>::create_ggc (37); type_specializations = hash_table<spec_hasher>::create_ggc (37); if (cxx_dialect >= cxx11) declare_integer_pack (); } /* Print stats about the template hash tables for -fstats. */ void print_template_statistics (void) { fprintf (stderr, "decl_specializations: size %ld, %ld elements, " "%f collisions\n", (long) decl_specializations->size (), (long) decl_specializations->elements (), decl_specializations->collisions ()); fprintf (stderr, "type_specializations: size %ld, %ld elements, " "%f collisions\n", (long) type_specializations->size (), (long) type_specializations->elements (), type_specializations->collisions ()); } #if CHECKING_P namespace selftest { /* Verify that build_non_dependent_expr () works, for various expressions, and that location wrappers don't affect the results. */ static void test_build_non_dependent_expr () { location_t loc = BUILTINS_LOCATION; /* Verify constants, without and with location wrappers. */ tree int_cst = build_int_cst (integer_type_node, 42); ASSERT_EQ (int_cst, build_non_dependent_expr (int_cst)); tree wrapped_int_cst = maybe_wrap_with_location (int_cst, loc); ASSERT_TRUE (location_wrapper_p (wrapped_int_cst)); ASSERT_EQ (wrapped_int_cst, build_non_dependent_expr (wrapped_int_cst)); tree string_lit = build_string (4, "foo"); TREE_TYPE (string_lit) = char_array_type_node; string_lit = fix_string_type (string_lit); ASSERT_EQ (string_lit, build_non_dependent_expr (string_lit)); tree wrapped_string_lit = maybe_wrap_with_location (string_lit, loc); ASSERT_TRUE (location_wrapper_p (wrapped_string_lit)); ASSERT_EQ (wrapped_string_lit, build_non_dependent_expr (wrapped_string_lit)); } /* Verify that type_dependent_expression_p () works correctly, even in the presence of location wrapper nodes. */ static void test_type_dependent_expression_p () { location_t loc = BUILTINS_LOCATION; tree name = get_identifier ("foo"); /* If no templates are involved, nothing is type-dependent. */ gcc_assert (!processing_template_decl); ASSERT_FALSE (type_dependent_expression_p (name)); ++processing_template_decl; /* Within a template, an unresolved name is always type-dependent. */ ASSERT_TRUE (type_dependent_expression_p (name)); /* Ensure it copes with NULL_TREE and errors. */ ASSERT_FALSE (type_dependent_expression_p (NULL_TREE)); ASSERT_FALSE (type_dependent_expression_p (error_mark_node)); /* A USING_DECL in a template should be type-dependent, even if wrapped with a location wrapper (PR c++/83799). */ tree using_decl = build_lang_decl (USING_DECL, name, NULL_TREE); TREE_TYPE (using_decl) = integer_type_node; ASSERT_TRUE (type_dependent_expression_p (using_decl)); tree wrapped_using_decl = maybe_wrap_with_location (using_decl, loc); ASSERT_TRUE (location_wrapper_p (wrapped_using_decl)); ASSERT_TRUE (type_dependent_expression_p (wrapped_using_decl)); --processing_template_decl; } /* Run all of the selftests within this file. */ void cp_pt_c_tests () { test_build_non_dependent_expr (); test_type_dependent_expression_p (); } } // namespace selftest #endif /* #if CHECKING_P */ #include "gt-cp-pt.h"
GB_binop__ne_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ne_fp64 // A.*B function (eWiseMult): GB_AemultB__ne_fp64 // A*D function (colscale): GB_AxD__ne_fp64 // D*A function (rowscale): GB_DxB__ne_fp64 // C+=B function (dense accum): GB_Cdense_accumB__ne_fp64 // C+=b function (dense accum): GB_Cdense_accumb__ne_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_fp64 // C=scalar+B GB_bind1st__ne_fp64 // C=scalar+B' GB_bind1st_tran__ne_fp64 // C=A+scalar GB_bind2nd__ne_fp64 // C=A'+scalar GB_bind2nd_tran__ne_fp64 // C type: bool // A type: double // B,b type: double // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_FP64 || GxB_NO_NE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ne_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ne_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ne_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ne_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ne_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__ne_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ne_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ne_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ne_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__ne_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__ne_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
error.h
#include <math.h> #include "disptools.h" /*! * \brief Redistribute the volume change over the image. * * Redistribute the change of volume within the body on the background, * and enforce the total volume change over the entire image to be zero. */ static inline void redistribute_volume_change( const Image J, const Mask mask ) { FLOATING total_change = 0.0; size_t background_voxels_count = 0; for (size_t z = 0; z < J.nz; ++z) { for (size_t y = 0; y < J.ny; ++y) { for (size_t x = 0; x < J.nx; ++x) { total_change += __(J, x, y, z) - 1.0; if (!__(mask, x, y, z)) { background_voxels_count += 1; } } } } const FLOATING correction = -total_change / background_voxels_count; for (size_t z = 0; z < J.nz; ++z) { for (size_t y = 0; y < J.ny; ++y) { for (size_t x = 0; x < J.nx; ++x) { if (!__(mask, x, y, z)) { __(J, x, y, z) = __(J, x, y, z) + correction; } } } } } /*! * \brief Compute a tolerance map over the volume. * * The tolerance is zero within the body volume and along the boundary, * while it is set to `tolerance' on the remaining background space. * Zero tolerance on the boundary prevents the vector field from * flowing outside the image space in the contours of the regions * where the body touches the boundary. * * \note: The resulting `Image' object must be deallocated with `destroy_image'. */ static inline Image create_tolerance_map( const Mask mask, const FLOATING tolerance ) { const size_t nx = mask.nx, ny = mask.ny, nz = mask.nz; Image tolerance_map = new_image(1, nx, ny, nz, 1.0, 1.0, 1.0); if (!tolerance_map.data) { return tolerance_map; } for (size_t z = 0; z < nz; ++z) { for (size_t y = 0; y < ny; ++y) { for (size_t x = 0; x < nx; ++x) { if (__(mask, x, y, z) || x == 0 || y == 0 || z == 0 || x == nx-1 || y == ny-1 || z == nz-1) { __(tolerance_map, x, y, z) = 0.0; } else { __(tolerance_map, x, y, z) = tolerance; } } } } return tolerance_map; } /*! * \brief Compute the error on the Jacobian of the moving field. */ static inline FLOATING compute_error( const Image J, /*!< Target Jacobian */ const Image J_field, /*!< Current Jacobian */ const Mask mask, /*!< Body mask */ const FLOATING tolerance, /*!< Jacobian tolerance on background */ const Image voxel_error, /*!< Error on the Jacobian */ FLOATING *max_voxel_error /*!< Maximum voxel error */ ) { // Cumulative error over the entire Jacobian map FLOATING total_error = 0.0; // Local variable for maximum voxel error FLOATING max_error = 0.0; // Compute the error on the Jacobian map for all voxels #ifdef __GNUC__ #define MAX_ERROR_ACC max_error #pragma omp parallel for \ reduction(+: total_error) \ reduction(max: max_error) \ collapse(3) \ schedule(static) for (size_t z = 0; z < J.nz; ++z) { for (size_t y = 0; y < J.ny; ++y) { for (size_t x = 0; x < J.nx; ++x) { #else // MSVC 15 does not support OpenMP > 2.0 #define MAX_ERROR_ACC local_max_error #pragma omp parallel { FLOATING local_max_error = 0.0; int z; #pragma omp for nowait \ reduction(+: total_error) for (z = 0; z < J.nz; ++z) { for (size_t y = 0; y < J.ny; ++y) { for (size_t x = 0; x < J.nx; ++x) { #endif // Compute the error on the voxel FLOATING error = __(J_field, x, y, z) - __(J, x, y, z); // Inside the body mask if (__(mask, x, y, z)) { __(voxel_error, x, y, z) = error; } // Tolerate some error in the background else { const FLOATING abs_error = fabs(error); __(voxel_error, x, y, z) = abs_error < tolerance ? 0.0 : copysign(abs_error - tolerance, error); } // Update total and maximum local errors // The maximum voxel error is checked only within the // body volume marked by the mask total_error += __(voxel_error, x, y, z) * __(voxel_error, x, y, z); if (__(mask, x, y, z) && fabs(__(voxel_error, x, y, z)) > MAX_ERROR_ACC) { MAX_ERROR_ACC = fabs(__(voxel_error, x, y, z)); } } } } #ifndef __GNUC__ // Manual max reduction, MSVC 15 does not provide it #pragma omp critical { if (local_max_error > max_error) { max_error = local_max_error; } } } // pragma omp parallel #endif #undef MAX_ERROR_ACC // Return maximum voxel error *max_voxel_error = max_error; return total_error; }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
augmentation_operator.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file augmentation_operator.h * * \brief Contains implementation of sirius::Augmentation_operator class. */ #ifndef __AUGMENTATION_OPERATOR_H__ #define __AUGMENTATION_OPERATOR_H__ #include "radial_integrals.h" namespace sirius { class Augmentation_operator { private: Simulation_context_base const& ctx_; Communicator const& comm_; Atom_type const& atom_type_; mdarray<double, 2> q_mtrx_; mdarray<double, 2> q_pw_; mdarray<double, 1> sym_weight_; void generate_pw_coeffs(double omega__, Gvec const& gvec__, Radial_integrals_aug<false> const& radial_integrals__) { PROFILE("sirius::Augmentation_operator::generate_pw_coeffs"); double fourpi_omega = fourpi / omega__; /* maximum l of beta-projectors */ int lmax_beta = atom_type_.indexr().lmax(); int lmmax = Utils::lmmax(2 * lmax_beta); auto l_by_lm = Utils::l_by_lm(2 * lmax_beta); std::vector<double_complex> zilm(lmmax); for (int l = 0, lm = 0; l <= 2 * lmax_beta; l++) { for (int m = -l; m <= l; m++, lm++) { zilm[lm] = std::pow(double_complex(0, 1), l); } } /* Gaunt coefficients of three real spherical harmonics */ Gaunt_coefficients<double> gaunt_coefs(lmax_beta, 2 * lmax_beta, lmax_beta, SHT::gaunt_rlm); /* split G-vectors between ranks */ int gvec_count = gvec__.count(); int gvec_offset = gvec__.offset(); /* array of real spherical harmonics for each G-vector */ mdarray<double, 2> gvec_rlm(Utils::lmmax(2 * lmax_beta), gvec_count); #pragma omp parallel for schedule(static) for (int igloc = 0; igloc < gvec_count; igloc++) { int ig = gvec_offset + igloc; auto rtp = SHT::spherical_coordinates(gvec__.gvec_cart(ig)); SHT::spherical_harmonics(2 * lmax_beta, rtp[1], rtp[2], &gvec_rlm(0, igloc)); } /* number of beta-projectors */ int nbf = atom_type_.mt_basis_size(); /* array of plane-wave coefficients */ q_pw_ = mdarray<double, 2>(nbf * (nbf + 1) / 2, 2 * gvec_count, memory_t::host_pinned, "q_pw_"); #pragma omp parallel for schedule(static) for (int igloc = 0; igloc < gvec_count; igloc++) { int ig = gvec_offset + igloc; double g = gvec__.gvec_len(ig); std::vector<double_complex> v(lmmax); auto ri = radial_integrals__.values(atom_type_.id(), g); for (int xi2 = 0; xi2 < nbf; xi2++) { int lm2 = atom_type_.indexb(xi2).lm; int idxrf2 = atom_type_.indexb(xi2).idxrf; for (int xi1 = 0; xi1 <= xi2; xi1++) { int lm1 = atom_type_.indexb(xi1).lm; int idxrf1 = atom_type_.indexb(xi1).idxrf; /* packed orbital index */ int idx12 = Utils::packed_index(xi1, xi2); //xi2 * (xi2 + 1) / 2 + xi1; /* packed radial-function index */ int idxrf12 = Utils::packed_index(idxrf1, idxrf2); //idxrf2 * (idxrf2 + 1) / 2 + idxrf1; for (int lm3 = 0; lm3 < lmmax; lm3++) { v[lm3] = std::conj(zilm[lm3]) * gvec_rlm(lm3, igloc) * ri(idxrf12, l_by_lm[lm3]); } double_complex z = fourpi_omega * gaunt_coefs.sum_L3_gaunt(lm2, lm1, &v[0]); q_pw_(idx12, 2 * igloc) = z.real(); q_pw_(idx12, 2 * igloc + 1) = z.imag(); } } } sym_weight_ = mdarray<double, 1>(nbf * (nbf + 1) / 2, memory_t::host_pinned, "sym_weight_"); for (int xi2 = 0; xi2 < nbf; xi2++) { for (int xi1 = 0; xi1 <= xi2; xi1++) { /* packed orbital index */ int idx12 = Utils::packed_index(xi1, xi2); //xi2 * (xi2 + 1) / 2 + xi1; sym_weight_(idx12) = (xi1 == xi2) ? 1 : 2; } } q_mtrx_ = mdarray<double, 2>(nbf, nbf); q_mtrx_.zero(); if (comm_.rank() == 0) { for (int xi2 = 0; xi2 < nbf; xi2++) { for (int xi1 = 0; xi1 <= xi2; xi1++) { /* packed orbital index */ int idx12 = Utils::packed_index(xi1, xi2); //xi2 * (xi2 + 1) / 2 + xi1; q_mtrx_(xi1, xi2) = q_mtrx_(xi2, xi1) = omega__ * q_pw_(idx12, 0); } } } /* broadcast from rank#0 */ comm_.bcast(&q_mtrx_(0, 0), nbf * nbf , 0); if (ctx_.control().print_checksum_) { double cs = q_pw_.checksum(); comm_.allreduce(&cs, 1); if (comm_.rank() == 0) { print_checksum("q_pw", cs); } } } public: Augmentation_operator(Simulation_context_base const& ctx__, int iat__) : ctx_(ctx__) , comm_(ctx__.comm()) , atom_type_(ctx__.unit_cell().atom_type(iat__)) { if (atom_type_.augment()) { generate_pw_coeffs(ctx__.unit_cell().omega(), ctx__.gvec(), ctx__.aug_ri()); } } void prepare(int stream_id__) { if (atom_type_.parameters().processing_unit() == GPU && atom_type_.augment()) { sym_weight_.allocate(memory_t::device); sym_weight_.async_copy<memory_t::host, memory_t::device>(stream_id__); q_pw_.allocate(memory_t::device); q_pw_.async_copy<memory_t::host, memory_t::device>(stream_id__); } } void dismiss() { if (atom_type_.parameters().processing_unit() == GPU && atom_type_.augment()) { q_pw_.deallocate(memory_t::device); sym_weight_.deallocate(memory_t::device); } } mdarray<double, 2> const& q_pw() const { return q_pw_; } double q_pw(int i__, int ig__) const { return q_pw_(i__, ig__); } double const& q_mtrx(int xi1__, int xi2__) const { return q_mtrx_(xi1__, xi2__); } double& q_mtrx(int xi1__, int xi2__) { return q_mtrx_(xi1__, xi2__); } inline mdarray<double, 1> const& sym_weight() const { return sym_weight_; } /// Weight of Q_{\xi,\xi'}. /** 2 if off-diagonal (xi != xi'), 1 if diagonal (xi=xi') */ inline double sym_weight(int idx__) const { return sym_weight_(idx__); } }; class Augmentation_operator_gvec_deriv { private: Simulation_context_base const& ctx_; Communicator const& comm_; mdarray<double, 2> q_pw_; mdarray<double, 1> sym_weight_; mdarray<double, 2> rlm_g_; mdarray<double, 3> rlm_dg_; std::unique_ptr<Gaunt_coefficients<double>> gaunt_coefs_; public: Augmentation_operator_gvec_deriv(Simulation_context_base const& ctx__) : ctx_(ctx__) , comm_(ctx__.comm()) { PROFILE("sirius::Augmentation_operator_gvec_deriv|constructor"); int lmax = ctx__.unit_cell().lmax(); int lmmax = Utils::lmmax(2 * lmax); /* Gaunt coefficients of three real spherical harmonics */ gaunt_coefs_ = std::unique_ptr<Gaunt_coefficients<double>>(new Gaunt_coefficients<double>(lmax, 2 * lmax, lmax, SHT::gaunt_rlm)); /* split G-vectors between ranks */ int gvec_count = ctx_.gvec().count(); int gvec_offset = ctx_.gvec().offset(); rlm_g_ = mdarray<double, 2>(lmmax, gvec_count); rlm_dg_ = mdarray<double, 3>(lmmax, 3, gvec_count); /* array of real spherical harmonics and derivatives for each G-vector */ #pragma omp parallel for schedule(static) for (int igloc = 0; igloc < gvec_count; igloc++) { int ig = gvec_offset + igloc; auto rtp = SHT::spherical_coordinates(ctx_.gvec().gvec_cart(ig)); double theta = rtp[1]; double phi = rtp[2]; vector3d<double> dtheta_dq({std::cos(phi) * std::cos(theta), std::cos(theta) * std::sin(phi), -std::sin(theta)}); vector3d<double> dphi_dq({-std::sin(phi), std::cos(phi), 0.0}); SHT::spherical_harmonics(2 * lmax, theta, phi, &rlm_g_(0, igloc)); mdarray<double, 1> dRlm_dtheta(lmmax); mdarray<double, 1> dRlm_dphi_sin_theta(lmmax); SHT::dRlm_dtheta(2 * lmax, theta, phi, dRlm_dtheta); SHT::dRlm_dphi_sin_theta(2 * lmax, theta, phi, dRlm_dphi_sin_theta); for (int nu = 0; nu < 3; nu++) { for (int lm = 0; lm < lmmax; lm++) { rlm_dg_(lm, nu, igloc) = dRlm_dtheta[lm] * dtheta_dq[nu] + dRlm_dphi_sin_theta[lm] * dphi_dq[nu]; } } } } void generate_pw_coeffs(int iat__, Radial_integrals_aug<false> const& ri__, Radial_integrals_aug<true> const& ri_dq__, int nu__) { PROFILE("sirius::Augmentation_operator_gvec_deriv::generate_pw_coeffs"); auto& atom_type = ctx_.unit_cell().atom_type(iat__); /* maximum l of beta-projectors */ int lmax_beta = atom_type.indexr().lmax(); int lmmax = Utils::lmmax(2 * lmax_beta); auto l_by_lm = Utils::l_by_lm(2 * lmax_beta); std::vector<double_complex> zilm(lmmax); for (int l = 0, lm = 0; l <= 2 * lmax_beta; l++) { for (int m = -l; m <= l; m++, lm++) { zilm[lm] = std::pow(double_complex(0, 1), l); } } /* split G-vectors between ranks */ int gvec_count = ctx_.gvec().count(); int gvec_offset = ctx_.gvec().offset(); /* number of beta-projectors */ int nbf = atom_type.mt_basis_size(); /* array of plane-wave coefficients */ q_pw_ = mdarray<double, 2>(nbf * (nbf + 1) / 2, 2 * gvec_count, memory_t::host_pinned, "q_pw_dg_"); sddk::timer t2("sirius::Augmentation_operator_gvec_deriv::generate_pw_coeffs|qpw"); #pragma omp parallel for schedule(static) for (int igloc = 0; igloc < gvec_count; igloc++) { int ig = gvec_offset + igloc; double g = ctx_.gvec().gvec_len(ig); auto gvc = ctx_.gvec().gvec_cart(ig); std::vector<double_complex> v(lmmax); auto ri = ri__.values(atom_type.id(), g); auto ri_dg = ri_dq__.values(atom_type.id(), g); for (int xi2 = 0; xi2 < nbf; xi2++) { int lm2 = atom_type.indexb(xi2).lm; int idxrf2 = atom_type.indexb(xi2).idxrf; for (int xi1 = 0; xi1 <= xi2; xi1++) { int lm1 = atom_type.indexb(xi1).lm; int idxrf1 = atom_type.indexb(xi1).idxrf; /* packed orbital index */ int idx12 = xi2 * (xi2 + 1) / 2 + xi1; /* packed radial-function index */ int idxrf12 = idxrf2 * (idxrf2 + 1) / 2 + idxrf1; for (int lm3 = 0; lm3 < lmmax; lm3++) { v[lm3] = std::conj(zilm[lm3]) * (rlm_dg_(lm3, nu__, igloc) * ri(idxrf12, l_by_lm[lm3]) + rlm_g_(lm3, igloc) * ri_dg(idxrf12, l_by_lm[lm3]) * gvc[nu__]); } double_complex z = fourpi * gaunt_coefs_->sum_L3_gaunt(lm2, lm1, &v[0]); q_pw_(idx12, 2 * igloc) = z.real(); q_pw_(idx12, 2 * igloc + 1) = z.imag(); } } } t2.stop(); sym_weight_ = mdarray<double, 1>(nbf * (nbf + 1) / 2, memory_t::host_pinned, "sym_weight_"); for (int xi2 = 0; xi2 < nbf; xi2++) { for (int xi1 = 0; xi1 <= xi2; xi1++) { /* packed orbital index */ int idx12 = xi2 * (xi2 + 1) / 2 + xi1; sym_weight_(idx12) = (xi1 == xi2) ? 1 : 2; } } } //void prepare(int stream_id__) const //{ // #ifdef __GPU // if (atom_type_.parameters().processing_unit() == GPU && atom_type_.pp_desc().augment) { // sym_weight_.allocate(memory_t::device); // sym_weight_.async_copy_to_device(stream_id__); // q_pw_.allocate(memory_t::device); // q_pw_.async_copy_to_device(stream_id__); // } // #endif //} //void dismiss() const //{ // #ifdef __GPU // if (atom_type_.parameters().processing_unit() == GPU && atom_type_.pp_desc().augment) { // q_pw_.deallocate_on_device(); // sym_weight_.deallocate_on_device(); // } // #endif //} //mdarray<double, 2> const& q_pw() const //{ // return q_pw_; //} mdarray<double, 2> const& q_pw() const { return q_pw_; } double q_pw(int i__, int ig__) const { return q_pw_(i__, ig__); } //inline mdarray<double, 1> const& sym_weight() const //{ // return sym_weight_; //} // /// Weight of Q_{\xi,\xi'}. /** 2 if off-diagonal (xi != xi'), 1 if diagonal (xi=xi') */ inline double sym_weight(int idx__) const { return sym_weight_(idx__); } }; } #endif // __AUGMENTATION_OPERATOR_H__
core_math.h
// == mojo ==================================================================== // // Copyright (c) gnawice@gnawice.com. All rights reserved. // See LICENSE in root folder // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files(the "Software"), // to deal in the Software without restriction, including without // limitation the rights to use, copy, modify, merge, publish, distribute, // sublicense, and/or sell copies of the Software, and to permit persons to // whom the Software is furnished to do so, subject to the following // conditions : // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT // OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR // THE USE OR OTHER DEALINGS IN THE SOFTWARE. // // ============================================================================ // core_math.h: defines matrix class and math functions // ==================================================================== mojo == #pragma once #include <math.h> #include <string.h> #include <string> #include <cstdlib> #include <random> #include <algorithm> namespace mojo { enum pad_type { zero = 0, edge = 1, median_edge = 2 }; inline float dot(const float *x1, const float *x2, const int size) { switch (size) { case 1: return x1[0] * x2[0]; case 2: return x1[0] * x2[0] + x1[1] * x2[1]; case 3: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2]; case 4: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2] + x1[3] * x2[3]; case 5: return x1[0] * x2[0] + x1[1] * x2[1] + x1[2] * x2[2] + x1[3] * x2[3] + x1[4] * x2[4]; default: float v = 0; for (int i = 0; i<size; i++) v += x1[i] * x2[i]; return v; }; } inline float unwrap_2d_dot(const float *x1, const float *x2, const int size, int stride1, int stride2) { float v=0; for(int j=0; j<size; j++) v+= dot(&x1[stride1*j],&x2[stride2*j],size); return v; } // second item is rotated 180 (this is a convolution) inline float dot_rot180(const float *x1, const float *x2, const int size) { switch(size) { case 1: return x1[0]*x2[0]; case 2: return x1[0]*x2[1]+x1[1]*x2[0]; case 3: return x1[0]*x2[2]+x1[1]*x2[1]+x1[2]*x2[0]; case 4: return x1[0]*x2[3]+x1[1]*x2[2]+x1[2]*x2[1]+x1[3]*x2[0]; case 5: return x1[0]*x2[4]+x1[1]*x2[3]+x1[2]*x2[2]+x1[3]*x2[1]+x1[4]*x2[0]; default: float v=0; for(int i=0; i<size; i++) v+=x1[i]*x2[size-i-1]; return v; }; } inline float unwrap_2d_dot_rot180(const float *x1, const float *x2, const int size, int stride1, int stride2) { float v=0; for(int j=0; j<size; j++) { v+= dot_rot180(&x1[stride1*j],&x2[stride2*(size-j-1)],size); } return v; } inline void unwrap_aligned_NxN(const int N, float *aligned_out, const float *in, const int in_size, const int stride = 1) { const int node_size = (in_size - N)/stride + 1; int c1 = 0; int off = 0; const int inc_off = N*N*8; for (int j = 0; j < node_size; j += 1) // intput h { for (int i = 0; i < node_size; i += 1) // intput w { const float *tn = in + j*in_size + i; if(N==5) { for (int k = 0; k < 5; k++) { aligned_out[c1 + 0 + k * 40 + off] = tn[0 + 0 + in_size*k]; aligned_out[c1 + 8 + k * 40 + off] = tn[0 + 1 + in_size*k]; aligned_out[c1 + 16 + k * 40 + off] = tn[0 + 2 + in_size*k]; aligned_out[c1 + 24 + k * 40 + off] = tn[0 + 3 + in_size*k]; aligned_out[c1 + 32 + k * 40 + off] = tn[0 + 4 + in_size*k]; } } else if(N==3) { aligned_out[c1 + off] = tn[0]; aligned_out[c1 + 8 + off] = tn[0 + 1]; aligned_out[c1 + 16 + off] = tn[0 + 2]; aligned_out[c1 + 24 + off] = tn[0 + in_size]; aligned_out[c1 + 32 + off] = tn[0 + 1 + in_size]; aligned_out[c1 + 40 + off] = tn[0 + 2 + in_size]; aligned_out[c1 + 48 + off] = tn[0 + 2 * in_size]; aligned_out[c1 + 56 + off] = tn[0 + 1 + 2 * in_size]; aligned_out[c1 + 64 + off] = tn[0 + 2 + 2 * in_size]; } else { int cnt=0; for (int k = 0; k < N; k++) { for (int m = 0; m < N; m++) { aligned_out[c1 + cnt*8 + off] = tn[0 + m + in_size*k]; cnt++; } } } off++; if (off > 7) { off = 0; c1 += inc_off; } } } } inline void dotsum_unwrapped_NxN(const int N, const float *im, const float *filter_ptr, float *out, const int outsize) { const int NN=N*N; for (int j = 0; j < outsize; j += 8) { float *c = out+j; for(int i=0; i<NN; i++) { const float f = filter_ptr[i]; c[0]+=im[0]*f; c[1]+=im[1]*f; c[2]+=im[2]*f; c[3]+=im[3]*f; c[4]+=im[4]*f; c[5]+=im[5]*f; c[6]+=im[6]*f; c[7]+=im[7]*f; im+=8; } } } #ifdef MOJO_AVX inline void dotsum_unwrapped_2x2(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 32; } _mm256_zeroupper(); } inline void dotsum_unwrapped_3x3(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); for (int j = 0; j < outsize; j += 8)//stride) // intput w { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 72; } _mm256_zeroupper(); } inline void dotsum_unwrapped_4x4(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); const __m256 f9 = _mm256_broadcast_ss(&filter_ptr[9]); const __m256 f10 = _mm256_broadcast_ss(&filter_ptr[10]); const __m256 f11 = _mm256_broadcast_ss(&filter_ptr[11]); const __m256 f12 = _mm256_broadcast_ss(&filter_ptr[12]); const __m256 f13 = _mm256_broadcast_ss(&filter_ptr[13]); const __m256 f14 = _mm256_broadcast_ss(&filter_ptr[14]); const __m256 f15 = _mm256_broadcast_ss(&filter_ptr[15]); for (int j = 0; j < outsize; j += 8)//stride) // intput w { __m256 a, c0, c1; // multiply filter a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 72); c1 = _mm256_mul_ps(a, f9); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 80); c1 = _mm256_mul_ps(a, f10); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 88); c1 = _mm256_mul_ps(a, f11); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 96); c1 = _mm256_mul_ps(a, f12); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 104); c1 = _mm256_mul_ps(a, f13); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 112); c1 = _mm256_mul_ps(a, f14); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 120); c1 = _mm256_mul_ps(a, f15); c0 = _mm256_add_ps(c0, c1); // add result to output a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 128; } _mm256_zeroupper(); } inline void dotsum_unwrapped_5x5(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); const __m256 f0 = _mm256_broadcast_ss(&filter_ptr[0]); const __m256 f1 = _mm256_broadcast_ss(&filter_ptr[1]); const __m256 f2 = _mm256_broadcast_ss(&filter_ptr[2]); const __m256 f3 = _mm256_broadcast_ss(&filter_ptr[3]); const __m256 f4 = _mm256_broadcast_ss(&filter_ptr[4]); const __m256 f5 = _mm256_broadcast_ss(&filter_ptr[5]); const __m256 f6 = _mm256_broadcast_ss(&filter_ptr[6]); const __m256 f7 = _mm256_broadcast_ss(&filter_ptr[7]); const __m256 f8 = _mm256_broadcast_ss(&filter_ptr[8]); const __m256 f9 = _mm256_broadcast_ss(&filter_ptr[9]); const __m256 f10 = _mm256_broadcast_ss(&filter_ptr[10]); const __m256 f11 = _mm256_broadcast_ss(&filter_ptr[11]); const __m256 f12 = _mm256_broadcast_ss(&filter_ptr[12]); const __m256 f13 = _mm256_broadcast_ss(&filter_ptr[13]); const __m256 f14 = _mm256_broadcast_ss(&filter_ptr[14]); const __m256 f15 = _mm256_broadcast_ss(&filter_ptr[15]); const __m256 f16 = _mm256_broadcast_ss(&filter_ptr[16]); const __m256 f17 = _mm256_broadcast_ss(&filter_ptr[17]); const __m256 f18 = _mm256_broadcast_ss(&filter_ptr[18]); const __m256 f19 = _mm256_broadcast_ss(&filter_ptr[19]); const __m256 f20 = _mm256_broadcast_ss(&filter_ptr[20]); const __m256 f21 = _mm256_broadcast_ss(&filter_ptr[21]); const __m256 f22 = _mm256_broadcast_ss(&filter_ptr[22]); const __m256 f23 = _mm256_broadcast_ss(&filter_ptr[23]); const __m256 f24 = _mm256_broadcast_ss(&filter_ptr[24]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f0); a = _mm256_load_ps(_img + 8); c1 = _mm256_mul_ps(a, f1); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 16); c1 = _mm256_mul_ps(a, f2); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 24); c1 = _mm256_mul_ps(a, f3); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 32); c1 = _mm256_mul_ps(a, f4); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 40); c1 = _mm256_mul_ps(a, f5); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 48); c1 = _mm256_mul_ps(a, f6); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 56); c1 = _mm256_mul_ps(a, f7); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 64); c1 = _mm256_mul_ps(a, f8); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 72); c1 = _mm256_mul_ps(a, f9); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 80); c1 = _mm256_mul_ps(a, f10); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 88); c1 = _mm256_mul_ps(a, f11); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 96); c1 = _mm256_mul_ps(a, f12); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 104); c1 = _mm256_mul_ps(a, f13); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 112); c1 = _mm256_mul_ps(a, f14); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 120); c1 = _mm256_mul_ps(a, f15); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 128); c1 = _mm256_mul_ps(a, f16); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 136); c1 = _mm256_mul_ps(a, f17); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 144); c1 = _mm256_mul_ps(a, f18); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 152); c1 = _mm256_mul_ps(a, f19); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 160); c1 = _mm256_mul_ps(a, f20); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 168); c1 = _mm256_mul_ps(a, f21); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 176); c1 = _mm256_mul_ps(a, f22); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 184); c1 = _mm256_mul_ps(a, f23); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(_img + 192); c1 = _mm256_mul_ps(a, f24); c0 = _mm256_add_ps(c0, c1); a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 200; } _mm256_zeroupper(); } inline void dotsum_unwrapped_7x7(const float *_img, const float *filter_ptr, float *out, const int outsize) { _mm256_zeroupper(); __m256 f[49];//=new __m256(s); for(int i=0; i<49; i++) f[i]= _mm256_broadcast_ss(&filter_ptr[i]); for (int j = 0; j < outsize; j += 8) { __m256 a, c0, c1; a = _mm256_load_ps(_img); c0 = _mm256_mul_ps(a, f[0]); for(int i=1; i<49;i++) { a = _mm256_load_ps(_img + 8*i); c1 = _mm256_mul_ps(a, f[i]); c0 = _mm256_add_ps(c0, c1); } a = _mm256_load_ps(out + j); c0 = _mm256_add_ps(c0, a); _mm256_stream_ps(out + j, c0); _img += 49*8; } _mm256_zeroupper(); //delete [] f; } #else // no AVX inline void dotsum_unwrapped_2x2(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(2, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_3x3(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(3, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_4x4(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(4, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_5x5(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(5, _img, filter_ptr, out, outsize); } inline void dotsum_unwrapped_7x7(const float *_img, const float *filter_ptr, float *out, const int outsize) { dotsum_unwrapped_NxN(7, _img, filter_ptr, out, outsize); } #endif // matrix class --------------------------------------------------- // should use opencv if available // class matrix { int _size; int _capacity; float *_x_mem; void delete_x() { delete[] _x_mem; x = NULL; _x_mem = NULL; } // 4 extra for alignment and 4 for 3 padding for SSE //float *new_x(const int size) { _x_mem = new float[size + 4+3]; x = (float *)(((uintptr_t)_x_mem + 16) & ~(uintptr_t)0x0F); return x; } // avx mem aligment float *new_x(const int size) { _x_mem = new float[size + 8 + 7]; x = (float *)(((uintptr_t)_x_mem + 32) & ~(uintptr_t)0x1F); return x; } public: std::string _name; int cols, rows, chans; int chan_stride; int chan_aligned; float *x; // size must be divisible by 8 for AVX virtual int calc_chan_stride(int w, int h) { if (chan_aligned) { int s = w*h; const int remainder = s % 8; if (remainder > 0) s += 8 - remainder; return s; } else return w*h; } matrix( ): cols(0), rows(0), chans(0), _size(0), _capacity(0), chan_stride(0), x(NULL), chan_aligned(0)/*, empty_chan(NULL)*/{} matrix( int _w, int _h, int _c=1, const float *data=NULL, int align_chan=0): cols(_w), rows(_h), chans(_c) { chan_aligned = align_chan; chan_stride = calc_chan_stride(cols, rows); _size= chan_stride*chans; _capacity=_size; x = new_x(_size); if(data!=NULL) memcpy(x,data,_size*sizeof(float)); } // copy constructor - deep copy matrix( const matrix &m) : cols(m.cols), rows(m.rows), chan_aligned(m.chan_aligned), chans(m.chans), chan_stride(m.chan_stride), _size(m._size), _capacity(m._size) {x = new_x(_size); memcpy(x,m.x,sizeof(float)*_size); /*empty_chan = new unsigned char[chans]; memcpy(empty_chan, m.empty_chan, chans);*/} // { v=m.v; x=(float*)v.data();} // copy and pad constructor matrix( const matrix &m, int pad_cols, int pad_rows, mojo::pad_type padding= mojo::zero, int threads=1) : cols(m.cols), rows(m.rows), chans(m.chans), chan_aligned(m.chan_aligned), chan_stride(m.chan_stride), _size(m._size), _capacity(m._size) { x = new_x(_size); memcpy(x, m.x, sizeof(float)*_size); *this = pad(pad_cols, pad_rows, padding, threads); } ~matrix() { if (x) delete_x(); } matrix get_chans(int start_channel, int num_chans=1) const { return matrix(cols,rows,num_chans,&x[start_channel*chan_stride]); } // if edge_pad==0, then the padded area is just 0. // if edge_pad==1 it fills with edge pixel colors // if edge_pad==2 it fills with median edge pixel color matrix pad(int dx, int dy, mojo::pad_type edge_pad = mojo::zero, int threads=1) const { return pad(dx, dy, dx, dy, edge_pad, threads); } matrix pad(int dx, int dy, int dx_right, int dy_bottom, mojo::pad_type edge_pad = mojo::zero, int threads=1) const { matrix v(cols+dx+dx_right,rows+dy+dy_bottom,chans);//,NULL,this->chan_aligned); v.fill(0); //float *new_x = new float[chans*w*h]; #pragma omp parallel for num_threads(threads) for(int k=0; k<chans; k++) { const int v_chan_offset=k*v.chan_stride; const int chan_offset=k*chan_stride; // find median color of perimeter float median = 0.f; if (edge_pad == mojo::median_edge) { int perimeter = 2 * (cols + rows - 2); std::vector<float> d(perimeter); for (int i = 0; i < cols; i++) { d[i] = x[i+ chan_offset]; d[i + cols] = x[i + cols*(rows - 1)+ chan_offset]; } for (int i = 1; i < (rows - 1); i++) { d[i + cols * 2] = x[cols*i+ chan_offset]; // file from back so i dont need to cal index d[perimeter - i] = x[cols - 1 + cols*i+ chan_offset]; } std::nth_element(d.begin(), d.begin() + perimeter / 2, d.end()); median = d[perimeter / 2]; //for (int i = 0; i < v.rows*v.cols; i++) v.x[v_chan_offset + i] = solid_fill; } for(int j=0; j<rows; j++) { memcpy(&v.x[dx+(j+dy)*v.cols+v_chan_offset], &x[j*cols+chan_offset], sizeof(float)*cols); if(edge_pad== mojo::edge) { // do left/right side for(int i=0; i<dx; i++) v.x[i+(j+dy)*v.cols+v_chan_offset]=x[0+j*cols+chan_offset]; for (int i = 0; i<dx_right; i++) v.x[i + dx + cols + (j + dy)*v.cols + v_chan_offset] = x[(cols - 1) + j*cols + chan_offset]; } else if (edge_pad == mojo::median_edge) { for (int i = 0; i < dx; i++) v.x[i + (j + dy)*v.cols + v_chan_offset] = median; for (int i = 0; i < dx_right; i++) v.x[i + dx + cols + (j + dy)*v.cols + v_chan_offset] = median; } } // top bottom pad if(edge_pad== mojo::edge) { for(int j=0; j<dy; j++) memcpy(&v.x[(j)*v.cols+v_chan_offset],&v.x[(dy)*v.cols+v_chan_offset], sizeof(float)*v.cols); for (int j = 0; j<dy_bottom; j++) memcpy(&v.x[(j + dy + rows)*v.cols + v_chan_offset], &v.x[(rows - 1 + dy)*v.cols + v_chan_offset], sizeof(float)*v.cols); } if (edge_pad == mojo::median_edge) { for (int j = 0; j<dy; j++) for (int i = 0; i<v.cols; i++) v.x[i + j*v.cols + v_chan_offset] = median; for (int j = 0; j<dy_bottom; j++) for (int i = 0; i<v.cols; i++) v.x[i + (j + dy + rows)*v.cols + v_chan_offset] = median; } } return v; } matrix crop(int dx, int dy, int w, int h, int threads=1) const { matrix v(w,h,chans); #pragma omp parallel for num_threads(threads) for(int k=0; k<chans; k++) { for(int j=0; j<h; j++) { memcpy(&v.x[j*w+k*v.chan_stride], &x[dx+(j+dy)*cols+k*chan_stride], sizeof(float)*w); } } return v; } mojo::matrix shift(int dx, int dy, mojo::pad_type edge_pad=mojo::zero) { int orig_cols=cols; int orig_rows=rows; int off_x=abs(dx); int off_y=abs(dy); mojo::matrix shifted= pad(off_x, off_y, edge_pad); return shifted.crop(off_x-dx, off_y-dy,orig_cols,orig_rows); } mojo::matrix flip_cols() { mojo::matrix v(cols,rows,chans); for(int k=0; k<chans; k++) for(int j=0; j<rows; j++) for(int i=0; i<cols; i++) v.x[i+j*cols+k*chan_stride]=x[(cols-i-1)+j*cols+k*chan_stride]; return v; } mojo::matrix flip_rows() { mojo::matrix v(cols, rows, chans); for (int k = 0; k<chans; k++) for (int j = 0; j<rows; j++) memcpy(&v.x[(rows-1-j)*cols + k*chan_stride],&x[j*cols + k*chan_stride], cols*sizeof(float)); return v; } void clip(float min, float max) { int s = chan_stride*chans; for (int i = 0; i < s; i++) { if (x[i] < min) x[i] = min; if (x[i] > max) x[i]=max; } } void min_max(float *min, float *max, int *min_i=NULL, int *max_i=NULL) { int s = rows*cols; int mini = 0; int maxi = 0; for (int c = 0; c < chans; c++) { const int t = chan_stride*c; for (int i = t; i < t+s; i++) { if (x[i] < x[mini]) mini = i; if (x[i] > x[maxi]) maxi = i; } } *min = x[mini]; *max = x[maxi]; if (min_i) *min_i = mini; if (max_i) *max_i = maxi; } float mean() { const int s = rows*cols; int cnt = 0;// channel*s; float average = 0; for (int c = 0; c < chans; c++) { const int t = chan_stride*c; for (int i = 0; i < s; i++) average += x[i + t]; } average = average / (float)(s*chans); return average; } float remove_mean(int channel) { int s = rows*cols; int offset = channel*chan_stride; float average=0; for(int i=0; i<s; i++) average+=x[i+offset]; average= average/(float)s; for(int i=0; i<s; i++) x[i+offset]-=average; return average; } float remove_mean() { float m=mean(); int s = chan_stride*chans; //int offset = channel*s; for(int i=0; i<s; i++) x[i]-=m; return m; } void fill(float val) { for(int i=0; i<_size; i++) x[i]=val; } void fill_random_uniform(float range) { std::mt19937 gen(0); std::uniform_real_distribution<float> dst(-range, range); for (int i = 0; i<_size; i++) x[i] = dst(gen); } void fill_random_normal(float std) { std::mt19937 gen(0); std::normal_distribution<float> dst(0, std); for (int i = 0; i<_size; i++) x[i] = dst(gen); } // deep copy inline matrix& operator =(const matrix &m) { resize(m.cols, m.rows, m.chans, m.chan_aligned); memcpy(x,m.x,sizeof(float)*_size); // memcpy(empty_chan, m.empty_chan, chans); return *this; } int size() const {return _size;} void resize(int _w, int _h, int _c, int align_chans=0) { chan_aligned = align_chans; int new_stride = calc_chan_stride(_w,_h); int s = new_stride*_c; if(s>_capacity) { if(_capacity>0) delete_x(); _size = s; _capacity=_size; x = new_x(_size); } cols = _w; rows = _h; chans = _c; _size = s; chan_stride = new_stride; } // dot vector to 2d mat inline matrix dot_1dx2d(const matrix &m_2d) const { mojo::matrix v(m_2d.rows, 1, 1); for(int j=0; j<m_2d.rows; j++) v.x[j]=dot(x,&m_2d.x[j*m_2d.cols],_size); return v; } // += inline matrix& operator+=(const matrix &m2){ for(int i = 0; i < _size; i++) x[i] += m2.x[i]; return *this; } // -= inline matrix& operator-=(const matrix &m2) { for (int i = 0; i < _size; i++) x[i] -= m2.x[i]; return *this; } #ifndef MOJO_AVX // *= float inline matrix operator *=(const float v) { for (int i = 0; i < _size; i++) x[i] = x[i] * v; return *this; } #else inline matrix operator *=(const float v) { __m128 b; b = _mm_set_ps(v, v, v, v); for (int j = 0; j < _size; j += 4) _mm_store_ps(x + j, _mm_mul_ps(_mm_load_ps(x + j), b)); return *this; } #endif // *= matrix inline matrix operator *=(const matrix &v) { for (int i = 0; i < _size; i++) x[i] = x[i] * v.x[i]; return *this; } inline matrix operator *(const matrix &v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] * v.x[i]; return T; } // * float inline matrix operator *(const float v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] * v; return T; } // + float inline matrix operator +(const float v) { matrix T(cols, rows, chans); for (int i = 0; i < _size; i++) T.x[i] = x[i] + v; return T; } // + inline matrix operator +(matrix m2) { matrix T(cols,rows,chans); for(int i = 0; i < _size; i++) T.x[i] = x[i] + m2.x[i]; return T; } }; }// namespace
pvector.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef PVECTOR_H_ #define PVECTOR_H_ #include <algorithm> /* GAP Benchmark Suite Class: pvector Author: Scott Beamer Vector class with ability to not initialize or do initialize in parallel - std::vector (when resizing) will always initialize, and does it serially - When pvector is resized, new elements are uninitialized - Resizing is not thread-safe */ template <typename T_> class pvector { public: typedef T_* iterator; pvector() : start_(nullptr), end_size_(nullptr), end_capacity_(nullptr) {} explicit pvector(size_t num_elements) { start_ = new T_[num_elements]; end_size_ = start_ + num_elements; end_capacity_ = end_size_; } pvector(size_t num_elements, T_ init_val) : pvector(num_elements) { fill(init_val); } pvector(iterator copy_begin, iterator copy_end) : pvector(copy_end - copy_begin) { #pragma omp parallel for for (size_t i=0; i < capacity(); i++) start_[i] = copy_begin[i]; } // don't want this to be copied, too much data to move pvector(const pvector &other) = delete; // prefer move because too much data to copy pvector(pvector &&other) : start_(other.start_), end_size_(other.end_size_), end_capacity_(other.end_capacity_) { other.start_ = nullptr; other.end_size_ = nullptr; other.end_capacity_ = nullptr; } // want move assignment pvector& operator= (pvector &&other) { if (this != &other) { ReleaseResources(); start_ = other.start_; end_size_ = other.end_size_; end_capacity_ = other.end_capacity_; other.start_ = nullptr; other.end_size_ = nullptr; other.end_capacity_ = nullptr; } return *this; } void ReleaseResources(){ if (start_ != nullptr) { delete[] start_; } } ~pvector() { ReleaseResources(); } // not thread-safe void reserve(size_t num_elements) { if (num_elements > capacity()) { T_ *new_range = new T_[num_elements]; #pragma omp parallel for for (size_t i=0; i < size(); i++) new_range[i] = start_[i]; end_size_ = new_range + size(); delete[] start_; start_ = new_range; end_capacity_ = start_ + num_elements; } } // prevents internal storage from being freed when this pvector is desctructed // - used by Builder to reuse an EdgeList's space for in-place graph building void leak() { start_ = nullptr; } bool empty() { return end_size_ == start_; } void clear() { end_size_ = start_; } void resize(size_t num_elements) { reserve(num_elements); end_size_ = start_ + num_elements; } T_& operator[](size_t n) { return start_[n]; } const T_& operator[](size_t n) const { return start_[n]; } void push_back(T_ val) { if (size() == capacity()) { size_t new_size = capacity() == 0 ? 1 : capacity() * growth_factor; reserve(new_size); } *end_size_ = val; end_size_++; } void fill(T_ init_val) { #pragma omp parallel for for (T_* ptr=start_; ptr < end_size_; ptr++) *ptr = init_val; } size_t capacity() const { return end_capacity_ - start_; } size_t size() const { return end_size_ - start_; } iterator begin() const { return start_; } iterator end() const { return end_size_; } T_* data() const { return start_; } void swap(pvector &other) { std::swap(start_, other.start_); std::swap(end_size_, other.end_size_); std::swap(end_capacity_, other.end_capacity_); } private: T_* start_; T_* end_size_; T_* end_capacity_; static const size_t growth_factor = 2; }; #endif // PVECTOR_H_
par_csr_matvec.c
/*BHEADER********************************************************************** * Copyright (c) 2017, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322. * This file is part of AMG. See files README and COPYRIGHT for details. * * AMG is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * This software is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the * GNU General Public License for more details. * ***********************************************************************EHEADER*/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include <assert.h> /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_Int num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_Int x_size = hypre_ParVectorGlobalSize(x); HYPRE_Int b_size = hypre_ParVectorGlobalSize(b); HYPRE_Int y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, jv, index, start; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( idxstride>0 ); if (num_cols != x_size) ierr = 11; if (num_rows != y_size || num_rows != b_size) ierr = 12; if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) ierr = 13; hypre_assert( hypre_VectorNumVectors(b_local)==num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local)==num_vectors ); if ( num_vectors==1 ) x_tmp = hypre_SeqVectorCreate( num_cols_offd ); else { hypre_assert( num_vectors>1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if ( use_persistent_comm ) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); hypre_assert(num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs)); hypre_VectorData(x_tmp) = (HYPRE_Complex *)persistent_comm_handle->recv_data; hypre_SeqVectorSetDataOwner(x_tmp, 0); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*,num_vectors); } hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (!use_persistent_comm) { x_buf_data = hypre_CTAlloc( HYPRE_Complex*, num_vectors ); for ( jv=0; jv<num_vectors; ++jv ) x_buf_data[jv] = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends)); } if ( num_vectors==1 ) { HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = begin; i < end; i++) { #ifdef HYPRE_USING_PERSISTENT_COMM ((HYPRE_Complex *)persistent_comm_handle->send_data)[i - begin] #else x_buf_data[0][i - begin] #endif = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)]; } } else for ( jv=0; jv<num_vectors; ++jv ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[jv][index++] = x_local_data[ jv*vecstride + idxstride*hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j) ]; } } hypre_assert( idxstride==1 ); /* ... The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data[jv], &(x_tmp_data[jv*num_cols_offd]) ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif if (num_cols_offd) hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv=0; jv<num_vectors; ++jv ) hypre_TFree(x_buf_data[jv]); hypre_TFree(x_buf_data); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_Int vecstride = hypre_VectorVectorStride( y_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( y_local ); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); HYPRE_Int num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int x_size = hypre_ParVectorGlobalSize(x); HYPRE_Int y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int i, j, jv, index, start, num_sends; HYPRE_Int ierr = 0; /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) ierr = 1; if (num_cols != y_size) ierr = 2; if (num_rows != x_size && num_cols != y_size) ierr = 3; /*----------------------------------------------------------------------- *-----------------------------------------------------------------------*/ if ( num_vectors==1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd,num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM // JSP TODO: we should be also able to use persistent communication for multiple vectors persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); hypre_assert(num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs)); hypre_VectorData(y_tmp) = (HYPRE_Complex *)persistent_comm_handle->send_data; hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*,num_vectors); } hypre_SeqVectorInitialize(y_tmp); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (!use_persistent_comm) { y_buf_data = hypre_CTAlloc( HYPRE_Complex*, num_vectors ); for ( jv=0; jv<num_vectors; ++jv ) y_buf_data[jv] = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends)); } y_tmp_data = hypre_VectorData(y_tmp); y_local_data = hypre_VectorData(y_local); hypre_assert( idxstride==1 ); /* only 'column' storage of multivectors * implemented so far */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (A->offdT) { // offdT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, A->offdT, x_local, 0.0, y_tmp); } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate ( 2, comm_pkg, &(y_tmp_data[jv*num_cols_offd]), y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif if (A->diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, A->diagT, x_local, beta, y_local); } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle); #endif } else { for ( jv=0; jv<num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif if ( num_vectors==1 ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) y_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] #ifdef HYPRE_USING_PERSISTENT_COMM += ((HYPRE_Complex *)persistent_comm_handle->recv_data)[index++]; #else += y_buf_data[0][index++]; #endif } } else for ( jv=0; jv<num_vectors; ++jv ) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) y_local_data[ jv*vecstride + idxstride*hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j) ] += y_buf_data[jv][index++]; } } hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv=0; jv<num_vectors; ++jv ) hypre_TFree(y_buf_data[jv]); hypre_TFree(y_buf_data); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_Int num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_Int x_size = hypre_ParVectorGlobalSize(x); HYPRE_Int y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); if (num_cols != x_size) ierr = 11; if (num_rows != y_size) ierr = 12; if (num_cols != x_size && num_rows != y_size) ierr = 13; if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends)); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends)); if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data); hypre_TFree(int_buf_data); hypre_TFree(CF_marker_offd); } return ierr; }
Fig_5.6_staticSchedule.c
#include <stdio.h> #include <math.h> #include <omp.h> #define ITER 100000000 // use a smaller value if available memory is small void main() { int i; double A[ITER]; for (i = 0; i < ITER; ++i) A[i] = 2.0*i; #pragma omp parallel { int i; int id = omp_get_thread_num(); double tdata = omp_get_wtime(); #pragma omp for schedule(static) for (i = 1; i < ITER; i++) // notice i starts from 1 since // the denominator below cannot be 0 A[i] = A[i] * sqrt(i) / pow(sin(i), tan(i)); tdata = omp_get_wtime() - tdata; if (id == 0) printf("Time spent is %f sec \n", tdata); } }
GxB_deserialize_type_name.c
//------------------------------------------------------------------------------ // GxB_deserialize_type_name: return the name of a type //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" #include "GB_serialize.h" // GxB_deserialize_type_name extracts the type_name of the GrB_Type of the // GrB_Matrix or GrB_Vector held in a serialized blob. On input, type_name // must point to a user-owned char array of size at least GxB_MAX_NAME_LEN (it // must not point into the blob itself). On output, type_name will contain a // null-terminated string with the corresponding C type name. If the blob // holds a matrix of a built-in type, the name is returned as "bool" for // GrB_BOOL, "uint8_t" for GrB_UINT8, "float complex" for GxB_FC32, etc. GrB_Info GxB_deserialize_type_name // return the type name of a blob ( // output: char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). // input, not modified: const void *blob, // the blob GrB_Index blob_size // size of the blob ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_deserialize_type_name (type_name, blob, blob_size)") ; GB_RETURN_IF_NULL (type_name) ; GB_RETURN_IF_NULL (blob) ; if (blob_size < GB_BLOB_HEADER_SIZE) { // blob is invalid return (GrB_INVALID_OBJECT) ; } //-------------------------------------------------------------------------- // get the blob header //-------------------------------------------------------------------------- size_t s = 0 ; GB_BLOB_READ (blob_size2, size_t) ; GB_BLOB_READ (typecode, int32_t) ; if (blob_size2 != (size_t) blob_size) { // blob is invalid return (GrB_INVALID_OBJECT) ; } //-------------------------------------------------------------------------- // get the type_name from the built-in type or the blob //-------------------------------------------------------------------------- if (typecode >= GB_BOOL_code && typecode < GB_UDT_code) { // blob has a built-in type; the name is not in the blob GrB_Type blob_type = GB_code_type ((GB_Type_code) typecode, NULL) ; ASSERT (blob_type != NULL) ; memcpy (type_name, blob_type->name, GxB_MAX_NAME_LEN) ; } else if (typecode == GB_UDT_code) { // blob has a user-defined type if (blob_size < GB_BLOB_HEADER_SIZE + GxB_MAX_NAME_LEN) { // blob is invalid return (GrB_INVALID_OBJECT) ; } // get the name of the user type from the blob memcpy (type_name, ((GB_void *) blob) + GB_BLOB_HEADER_SIZE, GxB_MAX_NAME_LEN) ; } else { // blob is invalid return (GrB_INVALID_OBJECT) ; } // this should already be in the blob, but set it to null just in case type_name [GxB_MAX_NAME_LEN-1] = '\0' ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
barrier.c
/* * Copyright 2015 Andrey Rodchenko, School of Computer Science, The University of Manchester * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #ifdef ARCH_MIC # include <immintrin.h> # include <zmmintrin.h> #endif #ifdef ARCH_X86_64 # include <emmintrin.h> #endif #ifdef OMP_BARRIER # include <omp.h> #endif #ifdef LIB_NUMA # include <numa.h> #endif #ifdef NBODY_BENCHMARK # include <math.h> #endif #include <sched.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <pthread.h> #include <sys/times.h> #include "barrier.h" const char ARCH_STR [ ] = #ifdef ARCH_MIC "mic"; #endif #ifdef ARCH_X86_64 "x86_64"; #endif #ifdef ARCH_ARMV7L "armv7l"; #endif const char BENCH_STR [ ] = #ifdef NBODY_BENCHMARK "nbody"; #endif #ifdef SANITY_BENCHMARK "sanity"; #endif #ifdef PURE_BENCHMARK "pure"; #endif #ifdef LDIMBL_BENCHMARK "ldimbl"; #endif #ifdef TMPL_BENCHMARK "tmpl"; #endif const char * BARRIER_STR = #ifdef PTHREAD_BARRIER "pthread"; #endif #ifdef SR_BARRIER "sr"; #endif #ifdef CTRGS_BARRIER "ctrgs"; #endif #ifdef CTRLS_BARRIER "ctrls"; #endif #ifdef DSMN_BARRIER "dsmn"; #endif #ifdef STNGS_BARRIER "stngs"; #endif #ifdef STNLS_BARRIER "stnls"; #endif #ifdef DTNGS_BARRIER "dtngs"; #endif #ifdef DTNLS_BARRIER "dtnls"; #endif #ifdef OMP_BARRIER "omp"; #endif #ifdef DSMNH_BARRIER "dsmnh"; #endif const char SPINNING_STR [ ] = #ifdef SPIN_SPINNING "spin"; #endif #ifdef HWYIELD_SPINNING "hwyield"; #endif #ifdef PTYIELD_SPINNING "ptyield"; #endif #ifdef PAUSE_SPINNING "pause"; #endif #ifdef WFE_SPINNING "wfe"; #endif static const char * HOSTNAME_STR; static const char * EXP_ID_STR; static bool INTERPOLATE_RADIX; static bool TOPOLOGY_AWARE_MAPPING; static bool USER_DEFINED_ACTIVE_PUS_SELECTION; static bool TOPOLOGY_NUMA_AWARE_ALLOC; static const char * bar_MachineIdToHostnameMap [ MACHINES_MAX_NUM ] = { "alpha", "beta", "gamma", "delta", "epsilpon", "zeta", "eta", "mic0", "omega" }; static const char * tpl_TopologyLevelName [ TPL_NODES_ALL_NUM ] = { "Machine", "Socket", "NumaNode", "Core", "PU", "Undefined", }; static int bar_ThreadIdToOsIdMap [ THREADS_MAX_NUM ] [ THREADS_MAX_NUM ]; static tpl_PU_t * bar_ThreadIdToPUMap [ THREADS_MAX_NUM ]; static tpl_PU_t * bar_OsIdToPUMap [ PUS_PER_MACHINE_MAX_NUM ]; /** * PUs topology descriptions set. */ static tpl_PU_t tpl_PUDescriptionsSet [ MACHINES_MAX_NUM ] [ SOCKETS_PER_MACHINE_MAX_NUM ] [ NUMANODES_PER_SOCKET_MAX_NUM ] [ CORES_PER_NUMANODE_MAX_NUM ] [ PUS_PER_CORE_MAX_NUM ] = { MACHINE_TOPOLOGY_INIT( SO( NU( CO( PU( 0)), CO( PU( 1)) ))), MACHINE_TOPOLOGY_INIT( SO( NU( CO( PU( 0), PU( 1)), CO( PU( 2), PU( 3)) ))), MACHINE_TOPOLOGY_INIT( SO( NU( CO( PU( 0), PU( 4)), CO( PU( 1), PU( 5)), CO( PU( 2), PU( 6)), CO( PU( 3), PU( 7)) ))), MACHINE_TOPOLOGY_INIT( SO( NU( CO( PU( 0), PU( 4)), CO( PU( 1), PU( 5)), CO( PU( 2), PU( 6)), CO( PU( 3), PU( 7)) ))), MACHINE_TOPOLOGY_INIT( SO( NU( CO( PU( 0), PU( 12)), CO( PU( 1), PU( 13)), CO( PU( 2), PU( 14)), CO( PU( 3), PU( 15)), CO( PU( 4), PU( 16)), CO( PU( 5), PU( 17)) )), SO( NU( CO( PU( 6), PU( 18)), CO( PU( 7), PU( 19)), CO( PU( 8), PU( 20)), CO( PU( 9), PU( 21)), CO( PU( 10), PU( 22)), CO( PU( 11), PU( 23)) ))), MACHINE_TOPOLOGY_INIT( SO( NU( CO( PU( 0), PU( 4)), CO( PU( 8), PU( 12)) ), NU( CO( PU( 16), PU( 20)), CO( PU( 24), PU( 28)), )), SO( NU( CO( PU( 1), PU( 5)), CO( PU( 9), PU( 13)) ), NU( CO( PU( 17), PU( 21)), CO( PU( 25), PU( 29)), )), SO( NU( CO( PU( 2), PU( 6)), CO( PU( 10), PU( 14)) ), NU( CO( PU( 18), PU( 22)), CO( PU( 26), PU( 30)), )), SO( NU( CO( PU( 3), PU( 7)), CO( PU( 11), PU( 15)) ), NU( CO( PU( 19), PU( 23)), CO( PU( 27), PU( 31)), ))), MACHINE_TOPOLOGY_INIT( SO( NU( CO( PU( 0), PU( 4)), CO( PU( 8), PU( 12)), CO( PU( 16), PU( 20)), CO( PU( 24), PU( 28)), ), NU( CO( PU( 32), PU( 36)), CO( PU( 40), PU( 44)), CO( PU( 48), PU( 52)), CO( PU( 56), PU( 60)), )), SO( NU( CO( PU( 1), PU( 5)), CO( PU( 9), PU( 13)), CO( PU( 17), PU( 21)), CO( PU( 25), PU( 29)), ), NU( CO( PU( 33), PU( 37)), CO( PU( 41), PU( 45)), CO( PU( 49), PU( 53)), CO( PU( 57), PU( 61)), )), SO( NU( CO( PU( 2), PU( 6)), CO( PU( 10), PU( 14)), CO( PU( 18), PU( 22)), CO( PU( 26), PU( 30)), ), NU( CO( PU( 34), PU( 38)), CO( PU( 42), PU( 46)), CO( PU( 50), PU( 54)), CO( PU( 58), PU( 62)), )), SO( NU( CO( PU( 3), PU( 7)), CO( PU( 11), PU( 15)), CO( PU( 19), PU( 23)), CO( PU( 27), PU( 31)), ), NU( CO( PU( 35), PU( 39)), CO( PU( 43), PU( 47)), CO( PU( 51), PU( 55)), CO( PU( 59), PU( 63)), ))), MACHINE_TOPOLOGY_INIT( SO( NU( CO( PU( 0), PU( 237), PU( 238), PU( 239)), CO( PU( 1), PU( 2), PU( 3), PU( 4)), CO( PU( 5), PU( 6), PU( 7), PU( 8)), CO( PU( 9), PU( 10), PU( 11), PU( 12)), CO( PU( 13), PU( 14), PU( 15), PU( 16)), CO( PU( 17), PU( 18), PU( 19), PU( 20)), CO( PU( 21), PU( 22), PU( 23), PU( 24)), CO( PU( 25), PU( 26), PU( 27), PU( 28)), CO( PU( 29), PU( 30), PU( 31), PU( 32)), CO( PU( 33), PU( 34), PU( 35), PU( 36)), CO( PU( 37), PU( 38), PU( 39), PU( 40)), CO( PU( 41), PU( 42), PU( 43), PU( 44)), CO( PU( 45), PU( 46), PU( 47), PU( 48)), CO( PU( 49), PU( 50), PU( 51), PU( 52)), CO( PU( 53), PU( 54), PU( 55), PU( 56)), CO( PU( 57), PU( 58), PU( 59), PU( 60)), CO( PU( 61), PU( 62), PU( 63), PU( 64)), CO( PU( 65), PU( 66), PU( 67), PU( 68)), CO( PU( 69), PU( 70), PU( 71), PU( 72)), CO( PU( 73), PU( 74), PU( 75), PU( 76)), CO( PU( 77), PU( 78), PU( 79), PU( 80)), CO( PU( 81), PU( 82), PU( 83), PU( 84)), CO( PU( 85), PU( 86), PU( 87), PU( 88)), CO( PU( 89), PU( 90), PU( 91), PU( 92)), CO( PU( 93), PU( 94), PU( 95), PU( 96)), CO( PU( 97), PU( 98), PU( 99), PU( 100)), CO( PU( 101), PU( 102), PU( 103), PU( 104)), CO( PU( 105), PU( 106), PU( 107), PU( 108)), CO( PU( 109), PU( 110), PU( 111), PU( 112)), CO( PU( 113), PU( 114), PU( 115), PU( 116)), CO( PU( 117), PU( 118), PU( 119), PU( 120)), CO( PU( 121), PU( 122), PU( 123), PU( 124)), CO( PU( 125), PU( 126), PU( 127), PU( 128)), CO( PU( 129), PU( 130), PU( 131), PU( 132)), CO( PU( 133), PU( 134), PU( 135), PU( 136)), CO( PU( 137), PU( 138), PU( 139), PU( 140)), CO( PU( 141), PU( 142), PU( 143), PU( 144)), CO( PU( 145), PU( 146), PU( 147), PU( 148)), CO( PU( 149), PU( 150), PU( 151), PU( 152)), CO( PU( 153), PU( 154), PU( 155), PU( 156)), CO( PU( 157), PU( 158), PU( 159), PU( 160)), CO( PU( 161), PU( 162), PU( 163), PU( 164)), CO( PU( 165), PU( 166), PU( 167), PU( 168)), CO( PU( 169), PU( 170), PU( 171), PU( 172)), CO( PU( 173), PU( 174), PU( 175), PU( 176)), CO( PU( 177), PU( 178), PU( 179), PU( 180)), CO( PU( 181), PU( 182), PU( 183), PU( 184)), CO( PU( 185), PU( 186), PU( 187), PU( 188)), CO( PU( 189), PU( 190), PU( 191), PU( 192)), CO( PU( 193), PU( 194), PU( 195), PU( 196)), CO( PU( 197), PU( 198), PU( 199), PU( 200)), CO( PU( 201), PU( 202), PU( 203), PU( 204)), CO( PU( 205), PU( 206), PU( 207), PU( 208)), CO( PU( 209), PU( 210), PU( 211), PU( 212)), CO( PU( 213), PU( 214), PU( 215), PU( 216)), CO( PU( 217), PU( 218), PU( 219), PU( 220)), CO( PU( 221), PU( 222), PU( 223), PU( 224)), CO( PU( 225), PU( 226), PU( 227), PU( 228)), CO( PU( 229), PU( 230), PU( 231), PU( 232)), CO( PU( 233), PU( 234), PU( 235), PU( 236)), ))), MACHINE_TOPOLOGY_INIT( SO( NU( CO( PU( 0)), CO( PU( 1)) ))) }; /** * Machine topology summaries set. */ static tpl_MachineSummary_t tpl_MachineSummariesSet [ MACHINES_MAX_NUM ] = { MACHINE_SUMMARY_INIT( TPL_TYPE_HOMOGENEOUS_SYMMETRIC, TPL_SMT_NONE, 1, 1, 2, 1, MALLOC_ANY, AVOID_INTERNUMANODE_CONNECTIONS, 2), MACHINE_SUMMARY_INIT( TPL_TYPE_HOMOGENEOUS_SYMMETRIC, TPL_SMT_HT, 1, 1, 2, 2, MALLOC_ANY, AVOID_INTERNUMANODE_CONNECTIONS, 4), MACHINE_SUMMARY_INIT( TPL_TYPE_HOMOGENEOUS_SYMMETRIC, TPL_SMT_HT, 1, 1, 4, 2, MALLOC_ANY, AVOID_INTERNUMANODE_CONNECTIONS, 8), MACHINE_SUMMARY_INIT( TPL_TYPE_HOMOGENEOUS_SYMMETRIC, TPL_SMT_HT, 1, 1, 4, 2, MALLOC_ANY, AVOID_INTERNUMANODE_CONNECTIONS, 8), MACHINE_SUMMARY_INIT( TPL_TYPE_HOMOGENEOUS_SYMMETRIC, TPL_SMT_HT, 2, 1, 6, 2, MALLOC_ON_W, AVOID_INTERNUMANODE_CONNECTIONS, 12), MACHINE_SUMMARY_INIT( TPL_TYPE_HOMOGENEOUS_SYMMETRIC, TPL_SMT_CMT, 4, 2, 2, 2, MALLOC_ANY, AVOID_INTERCORE_CONNECTIONS, 4), MACHINE_SUMMARY_INIT( TPL_TYPE_HOMOGENEOUS_SYMMETRIC, TPL_SMT_CMT, 4, 2, 4, 2, MALLOC_ANY, AVOID_INTERCORE_CONNECTIONS, 4), MACHINE_SUMMARY_INIT( TPL_TYPE_HOMOGENEOUS_SYMMETRIC, TPL_SMT_MIC, 1, 1, 60, 4, MALLOC_ANY, AVOID_INTERCORE_CONNECTIONS, 60), MACHINE_SUMMARY_INIT( TPL_TYPE_HOMOGENEOUS_SYMMETRIC, TPL_SMT_NONE, 1, 1, 2, 1, MALLOC_ANY, AVOID_INTERNUMANODE_CONNECTIONS, 2) }; /** * Machine description. */ static tpl_MachineDescription_t machineDescription; #if defined( PTHREAD_BARRIER) static pthread_barrier_t bar_pthreadBarrier [ BARRIERS_MAX_NUM ]; #endif #ifdef SR_BARRIER static sr_barrier_t bar_srBarrier [ BARRIERS_MAX_NUM ]; #endif #ifdef TREE_BARRIER static tree_barrier_t bar_treeBarrier [ BARRIERS_MAX_NUM ]; #endif #if defined( DSMN_BARRIER) || defined( DSMNH_BARRIER) static dsmn_barrier_t bar_dsmnBarrier [ BARRIERS_MAX_NUM ]; #endif #ifdef DSMNH_BARRIER static sr_barrier_t bar_srBarrier [ BARRIERS_MAX_NUM ][ THREADS_MAX_NUM ]; #endif /** * Online cpu set */ static cpu_set_t bar_onlineCpuSet; #ifdef SANITY_BENCHMARK /** * Array for sanity testing of barriers semantics */ static int * bar_TestArray [ THREADS_MAX_NUM ]; #endif /* SANITY_BENCHMARK */ #ifdef NBODY_BENCHMARK /** * Data buffer. */ static bar_Particle_t bar_ParticlesBuf [ THREADS_MAX_NUM ]; #endif /* NBODY_BENCHMARK */ static inline void bar_Assert( int i) { #ifndef NASSERT assert( i); #endif } static inline void bar_InternalError( char * fileName, unsigned lineNo) { fprintf( stderr, "Internal error %s:%u !\n", fileName, lineNo); exit( 1); } static inline void sys_SetOnlineCpuSet( cpu_set_t * onlineCpuSet) { * onlineCpuSet = bar_onlineCpuSet; } /* Lightweight memory manager. */ #define MEM_INIT_SIZE ( 8 * 1024 * 4096) static void * baseMemNuma [ NUMANODES_PER_MACHINE_MAX_NUM ]; static void * curMemNuma [ NUMANODES_PER_MACHINE_MAX_NUM ]; static void * baseMem; static void * curMem; unsigned memSize; static inline void bar_MemInit( ) { #ifdef LIB_NUMA if ( TOPOLOGY_NUMA_AWARE_ALLOC && (machineDescription.machineId != UNDEFINED_MACHINE_ID) && ((machineDescription.summary->socketsPerMachineNum * machineDescription.summary->numaNodesPerSocketNum) > 1)) { int numaId; for ( numaId = 0; numaId < (machineDescription.summary->socketsPerMachineNum * machineDescription.summary->numaNodesPerSocketNum); numaId++ ) { baseMemNuma [ numaId ] = numa_alloc_onnode( MEM_INIT_SIZE, numaId) + (CCL_SIZE * numaId); curMemNuma [ numaId ] = baseMemNuma [ numaId ]; } memSize = MEM_INIT_SIZE; } else #endif /* NUMA_ALLOC */ { baseMem = (void *) malloc( MEM_INIT_SIZE * NUMANODES_PER_MACHINE_MAX_NUM); curMem = (void *) ((((size_t) baseMem + CCL_SIZE - 1) / CCL_SIZE) * CCL_SIZE); memSize = MEM_INIT_SIZE; } } static inline void * bar_MemAlloc( int memSize, int threadRId, int threadWId, int * threadAllocId) { int deltaMem = (((memSize + CCL_SIZE - 1) / CCL_SIZE) * CCL_SIZE); void * retMem = 0; #ifdef LIB_NUMA if ( TOPOLOGY_NUMA_AWARE_ALLOC && (machineDescription.machineId != UNDEFINED_MACHINE_ID) && ((machineDescription.summary->socketsPerMachineNum * machineDescription.summary->numaNodesPerSocketNum) > 1)) { int threadId = (machineDescription.summary->numaMallocPolicy == MALLOC_ON_W) ? threadWId : threadRId; int numaId = bar_ThreadIdToPUMap [ threadId ]->numaOsId; if ( threadAllocId != NULL ) { (* threadAllocId) = threadId; } retMem = curMemNuma [ numaId ]; curMemNuma [ numaId ] += deltaMem; if ( (curMemNuma [ numaId ] - baseMemNuma [ numaId ]) > MEM_INIT_SIZE ) { bar_InternalError( __FILE__, __LINE__); } } else #endif /* NUMA_ALLOC */ { retMem = curMem; curMem += deltaMem; if ( (curMem - baseMem) > (MEM_INIT_SIZE * NUMANODES_PER_MACHINE_MAX_NUM) ) { bar_InternalError( __FILE__, __LINE__); } } return retMem; } static inline void bar_MemReuse( ) { #ifdef LIB_NUMA if ( TOPOLOGY_NUMA_AWARE_ALLOC && (machineDescription.machineId != UNDEFINED_MACHINE_ID) && ((machineDescription.summary->socketsPerMachineNum * machineDescription.summary->numaNodesPerSocketNum) > 1)) { int numaId; for ( numaId = 0; numaId < (machineDescription.summary->socketsPerMachineNum * machineDescription.summary->numaNodesPerSocketNum); numaId++ ) { curMemNuma [ numaId ] = baseMemNuma [ numaId ]; } } else #endif /* NUMA_ALLOC */ { curMem = (void *) ((((size_t) baseMem + CCL_SIZE - 1) / CCL_SIZE) * CCL_SIZE); } } static inline void bar_MemFini( ) { #ifdef LIB_NUMA if ( TOPOLOGY_NUMA_AWARE_ALLOC && (machineDescription.machineId != UNDEFINED_MACHINE_ID) && ((machineDescription.summary->socketsPerMachineNum * machineDescription.summary->numaNodesPerSocketNum) > 1)) { int numaId; for ( numaId = 0; numaId < (machineDescription.summary->socketsPerMachineNum * machineDescription.summary->numaNodesPerSocketNum); numaId++ ) { numa_free( baseMemNuma [ numaId ], MEM_INIT_SIZE); } } else #endif /* NUMA_ALLOC */ { free( baseMem); } } static inline int math_log2Ceil( int threadsNum) { int res = 0; bar_Assert( threadsNum > 0); while ( (1 << res) < threadsNum ) { res++; } return res; } static inline int math_logNCeil( int n, int threadsNum) { int res = 0; int ceil = 1; bar_Assert( threadsNum > 0); while ( ceil < threadsNum ) { ceil *= n; res ++; } return res; } #ifdef ARCH_X86_FAMILY static void x86_Cpuid( int in_eax, int * eax, int * ebx, int * ecx, int * edx ) { asm volatile( "pushq %%rbx \n\t" "cpuid \n\t" "movq %%rbx, %1\n\t" "popq %%rbx \n\t" : "=a"( eax), "=r"(ebx), "=c"(ecx), "=d"(edx) : "a"( in_eax) : "cc" ); } #endif static inline int sys_GetPrivilegeLevel( ) { # ifdef ARCH_ARM_FAMILY bar_InternalError( __FILE__, __LINE__); # else # ifdef ARCH_X86_FAMILY { short int csReg; short int ringMask = X86_RINGS_MASK; asm volatile( " mov %%cs, %0\n\t" : "=r"( csReg) ); return csReg & ringMask; } # else bar_InternalError( __FILE__, __LINE__); # endif # endif } #ifdef ARCH_LOAD_NC # ifdef ARCH_X86_64 static inline int load_nc_int( void * addr) { int res; __m128i siVec; asm volatile( " movntdqa %1, %0\n\t" : "=x"(siVec) : "m"(*(__m128i *)addr) ); res = _mm_cvtsi128_si32( siVec); return res; } static inline bool load_nc_bool( void * addr) { return load_nc_int( addr); } static inline int_max_ma_t load_nc_int_max_ma( void * addr) { return load_nc_int( addr); } # endif #endif #ifdef ARCH_STORE_NR_NGO # ifdef ARCH_MIC static inline void store_nr_ngo_int( void * addr, int data) { __m512i siVec = _mm512_set1_epi32( data); _mm512_storenrngo_ps( addr, _mm512_castsi512_ps( siVec)); # ifndef ARCH_STORE_NR_NGO_REFINED asm volatile( "lock; addl $0,(%rsp)\n"); # endif } static inline void store_nr_ngo_bool( void * addr, bool data) { store_nr_ngo_int( addr, data); } static inline void store_nr_ngo_int_max_ma( void * addr, int_max_ma_t data) { __m512i siVec = _mm512_set1_epi64( (__int64)data); bar_Assert( sizeof( __int64) <= HW_MAX_MA_SIZE_IN_BITS); _mm512_storenrngo_ps( addr, _mm512_castsi512_ps( siVec)); # ifndef ARCH_STORE_NR_NGO_REFINED asm volatile( "lock; addl $0,(%rsp)\n"); # endif } # endif /* ARCH_MIC */ #endif /* ARCH_STORE_NR_NGO */ #ifdef ARCH_STORE_NR # ifdef ARCH_MIC static inline void store_nr_int( void * addr, int data) { __m512i siVec = _mm512_set1_epi32( data); _mm512_storenr_ps( addr, _mm512_castsi512_ps( siVec)); } static inline void store_nr_bool( void * addr, bool data) { store_nr_int( addr, data); } static inline void store_nr_int_max_ma( void * addr, int_max_ma_t data) { __m512i siVec = _mm512_set1_epi64( (__int64)data); bar_Assert( sizeof( __int64) <= HW_MAX_MA_SIZE_IN_BITS); _mm512_storenr_ps( addr, _mm512_castsi512_ps( siVec)); } # endif /* ARCH_MIC */ # ifdef ARCH_X86_64 static inline void store_nr_int( void * addr, int data) { __m128i siVec = _mm_cvtsi32_si128( data); _mm_stream_si128( addr, siVec); } static inline void store_nr_bool( void * addr, bool data) { store_nr_int( addr, data); } static inline void store_nr_int_max_ma( void * addr, int_max_ma_t data) { __m128i siVec = _mm_cvtsi64_si128( data); _mm_stream_si128( addr, siVec); } # endif /* ARCH_X86_64 */ #endif /* ARCH_STORE_NR */ inline static void memory_barrier( ) { # ifdef ARCH_ARM_FAMILY asm volatile ( "dmb" : : : "memory"); # else # if defined( ARCH_X86_FAMILY) && !defined( ARCH_MIC) asm volatile ( "mfence" : : : "memory"); # else # if !defined( ARCH_MIC) bar_InternalError( __FILE__, __LINE__); # endif # endif # endif } #ifdef YIELD_SPINNING inline static void spinning_thread_yield( ) { #ifdef HWYIELD_SPINNING # ifdef ARCH_ARM_FAMILY asm volatile ( "yield\n\t"); # else # ifdef ARCH_X86_FAMILY asm volatile ( "hlt\n\t"); # else bar_InternalError( __FILE__, __LINE__); # endif # endif #endif /* HWYIELD_SPINNING */ #ifdef PTYIELD_SPINNING pthread_yield( ); #endif /* PTYIELD_SPINNING */ } #endif #ifdef PAUSE_SPINNING inline static void spinning_pause( ) { # ifdef ARCH_ARM_FAMILY asm volatile ( "wfi\n\t"); # else # ifdef ARCH_X86_FAMILY # ifdef ARCH_MIC _mm_delay_32( ARCH_MIC_DELAY); # else asm volatile ( "pause\n\t"); # endif # else bar_InternalError( __FILE__, __LINE__); # endif # endif } #endif /* PAUSE_SPINNING */ #ifdef WFE_SPINNING inline static void spinning_thread_wfe_init( void * mem) { # ifdef ARCH_ARM_FAMILY # else # if defined( ARCH_X86_FAMILY) && defined( ARCH_X86_MONITOR_MWAIT) { asm volatile( " mov %0, %%rax\n\t" " mov $0x0, %%rcx\n\t" " mov $0x0, %%rdx\n\t" " monitor %%rax, %%rcx, %%rdx\n\t" : : "r"( mem) : "%rax", "%rcx", "%rdx"); } # else bar_InternalError( __FILE__, __LINE__); # endif # endif } inline static void spinning_thread_wfe_wait( ) { # ifdef ARCH_ARM_FAMILY asm volatile ( "wfe\n"); # else # if defined( ARCH_X86_FAMILY) && defined( ARCH_X86_MONITOR_MWAIT) asm volatile( " movl 0, %%eax\n\t" " movl 0, %%ecx\n\t" " mwait\n" : : : "%eax", "%ecx"); # else bar_InternalError( __FILE__, __LINE__); # endif # endif } inline static void spinning_thread_wfe_send( ) { # ifdef ARCH_ARM_FAMILY asm volatile ( "sev\n"); # else # if defined( ARCH_X86_FAMILY) && defined( ARCH_X86_MONITOR_MWAIT) # else bar_InternalError( __FILE__, __LINE__); # endif # endif } #endif inline static int load_linked( volatile int * x) { int val = 0; #ifdef ARCH_LL_SC # ifdef ARCH_ARM_FAMILY asm volatile ("ldrex %0, [%1]\n\t" : "=r"( val) : "r"( x) : "memory"); return val; # else /* ARCH_ARM_FAMILY */ bar_InternalError( __FILE__, __LINE__); return val; # endif /* !ARCH_ARM_FAMILY */ #else /* ARCH_LL_SC */ bar_InternalError( __FILE__, __LINE__); return val; #endif /* !ARCH_LL_SC */ } inline static bool store_conditional( volatile int * x, int newVal) { #ifdef ARCH_LL_SC # ifdef ARCH_ARM_FAMILY int res; asm volatile ("strex %0, %1, [%2]\n\t" : "=r"( res) : "r"( newVal), "r"( x) : "memory", "r0" ); return !res; # else /* ARCH_ARM_FAMILY */ bar_InternalError( __FILE__, __LINE__); return RETURN_FAIL; # endif /* !ARCH_ARM_FAMILY */ #else /* ARCH_LL_SC */ bar_InternalError( __FILE__, __LINE__); return RETURN_FAIL; #endif /* !ARCH_LL_SC */ } inline static bool compare_and_swap( volatile int * x, int oldVal, int newVal) { #ifdef ARCH_CAS # ifdef ARCH_X86_FAMILY int res; __asm__ __volatile__ ( " lock\n\t" " cmpxchgl %2,%1\n\t" " sete %%al\n\t" " movzbl %%al, %0\n\t" : "=q" (res), "=m" (*x) : "r" (newVal), "m" (*x), "a" (oldVal) : "memory"); return (bool)res; # endif #else /* ARCH_CAS */ # ifdef ARCH_LL_SC { bool noSucc; do { if ( load_linked( x) != oldVal ) { return FALSE; } noSucc = store_conditional( x, newVal); } while ( !noSucc ); return TRUE; } # else /* ARCH_LL_SC */ bar_InternalError( __FILE__, __LINE__); return RETURN_FAIL; # endif /* !ARCH_LL_SC */ #endif /* !ARCH_CAS */ } inline static int fetch_and_add( volatile int * variable, int inc) { #ifdef ARCH_FETCH_AND_ADD # ifdef ARCH_X86_FAMILY asm volatile( "lock; xaddl %0, %1;\n\t" :"=r" (inc) /* Output */ :"m" (*variable), "0" (inc) /* Input */ :"memory" ); return inc; # else bar_InternalError( __FILE__, __LINE__); # endif #else /* ARCH_FETCH_AND_ADD */ # ifdef ARCH_LL_SC { bool noSucc; int val; do { val = load_linked( variable); noSucc = store_conditional( variable, val + inc); } while ( !noSucc ); return val; } # else /* ARCH_LL_SC */ # ifdef ARCH_CAS { bool succ = FALSE; do { int val; val = *variable; succ = compare_and_swap( variable, val, val + inc); } while ( !succ ); } # else bar_InternalError( __FILE__, __LINE__); # endif # endif /* !ARCH_LL_SC */ #endif /* !ARCH_FETCH_AND_ADD */ } #if defined( SR_BARRIER) || defined( DSMNH_BARRIER) static void sr_barrier_init( sr_barrier_t * sr_barrier, void * dummy, int threadsNum, int barrierId) { sr_barrier->sense = 0; sr_barrier->count = threadsNum; sr_barrier->threadsNum = threadsNum; sr_barrier->barrierId = barrierId; } static inline void sr_barrier_set_sense( volatile bool * addr, bool sense) { #ifdef ARCH_STORE_NR_NGO store_nr_ngo_bool( (void *) addr, sense); #else # ifdef ARCH_STORE_NR store_nr_bool( (void *) addr, sense); # else (* addr) = sense; # endif #endif } static inline bool sr_barrier_load_bool( volatile bool * addr) { #ifdef ARCH_LOAD_NC return load_nc_bool( (void *) addr); #else return (* addr); #endif } static inline bool sr_barrier_load_sense( volatile bool * sense_addr) { return sr_barrier_load_bool( sense_addr); } static inline void sr_barrier_set_count( volatile bool * addr, int count) { #ifdef ARCH_STORE_NR_NGO store_nr_ngo_int( (void *) addr, count); #else # ifdef ARCH_STORE_NR store_nr_int( (void *) addr, count); # else (* addr) = count; # endif #endif } #ifdef DSMNH_BARRIER static inline void dsmn_barrier_wait( dsmn_barrier_t * dsmn_barrier, tls_Data_t * dsmn_barrier_tls_data); #endif static inline void sr_barrier_wait( sr_barrier_t * sr_barrier, tls_Data_t * tlsData) { #ifdef DSMNH_BARRIER int * senseP = & tlsData->senseSR[ sr_barrier->barrierId ][ tlsData->groupId ].data; #else int * senseP = & tlsData->sense[ sr_barrier->barrierId ].data; #endif int currCount = fetch_and_add( & (sr_barrier->count), -1); int currSense = *senseP; if ( currCount == 1 ) { #ifdef DSMNH_BARRIER dsmn_barrier_wait( & bar_dsmnBarrier [ sr_barrier->barrierId ], tlsData->dsmnTlsData); #endif sr_barrier_set_count( & (sr_barrier->count), sr_barrier->threadsNum); asm volatile("": : :"memory"); #ifdef ARCH_STORE_NR_NGO_REFINED asm volatile( "lock; addl $0,(%rsp)\n"); #endif sr_barrier_set_sense( & (sr_barrier->sense), currSense); #ifdef WFE_SPINNING spinning_thread_wfe_send( ); #endif } else { #ifdef WFE_SPINNING if ( currSense != sr_barrier_load_sense( & (sr_barrier->sense)) ) { spinning_thread_wfe_init( (void *) & sr_barrier->sense); } #endif while ( currSense != sr_barrier_load_sense( & (sr_barrier->sense)) ) { #ifdef YIELD_SPINNING spinning_thread_yield( ); #endif #ifdef PAUSE_SPINNING spinning_pause( ); #endif #ifdef WFE_SPINNING spinning_thread_wfe_wait( ); #endif }; } sr_barrier_set_sense( senseP, !currSense); #ifdef ARCH_STORE_NR_NGO_REFINED asm volatile( "lock; addl $0,(%rsp)\n"); #endif } #endif /* SR_BARRIER || DSMNH_BARRIER */ #ifdef TREE_BARRIER # ifdef TRNM_BARRIER /** * Specialized (see assertions) power function */ static inline int math_pow( int val, int power) { int res = 1; bar_Assert( (power >= 0) && (power <= CEIL_LOG2_THREADS_MAX_NUM)); while ( power-- ) { res *= val; } return res; } # endif /* TRNM_BARRIER */ static void tree_InitBoundsGetActiveNodesHelper( tpl_PU_t * puToPlace, tpl_NodeType_t startTplLevel, int (* startId) [ TPL_NODES_DEFINED_NUM], int (* stopId) [ TPL_NODES_DEFINED_NUM]) { tpl_NodeType_t nt; int lb, hb; for ( nt = TPL_NODE_SOCKET; nt < TPL_NODES_DEFINED_NUM; nt++ ) { if ( nt < startTplLevel ) { switch ( nt ) { case TPL_NODE_SOCKET: { lb = puToPlace->socketId; hb = puToPlace->socketId + 1; } break; case TPL_NODE_NUMANODE: { lb = puToPlace->numaNodeId; hb = puToPlace->numaNodeId + 1; } break; case TPL_NODE_CORE: { lb = puToPlace->coreId ; hb = puToPlace->coreId + 1; } break; case TPL_NODE_PU: default: bar_InternalError( __FILE__, __LINE__); } } else { switch ( nt ) { case TPL_NODE_SOCKET: { lb = 0; hb = machineDescription.summary->socketsPerMachineNum; } break; case TPL_NODE_NUMANODE: { lb = 0; hb = machineDescription.summary->numaNodesPerSocketNum; } break; case TPL_NODE_CORE: { lb = 0; hb = machineDescription.summary->coresPerNumaNodeNum; } break; case TPL_NODE_PU: { lb = 0; hb = machineDescription.summary->pusPerCoreNum; } break; default: bar_InternalError( __FILE__, __LINE__); } } (*startId) [ nt ] = lb; (*stopId) [ nt ] = hb; } } static int tree_GetActiveNodesFromUpToLevel( tree_barrier_t * tree_barrier, tree_build_context_t * tbc, tpl_NodeType_t fromTplLevel, tpl_NodeType_t upToTplLevel) { int so, nu, co, pu; tpl_PU_t * puToPlace = bar_ThreadIdToPUMap [ tree_barrier->leavesNum ]; int startId [ TPL_NODES_DEFINED_NUM ]; int stopId [ TPL_NODES_DEFINED_NUM ]; int activeNodesNum = 0; if ( TOPOLOGY_AWARE_MAPPING == TRUE ) { tree_InitBoundsGetActiveNodesHelper( puToPlace, fromTplLevel, & startId, & stopId); for ( so = startId [ TPL_NODE_SOCKET ]; so < stopId [ TPL_NODE_SOCKET ]; so ++ ) { if ( upToTplLevel == TPL_NODE_SOCKET ) { activeNodesNum += (machineDescription.sockets [ so ].activeThreadsNum > 0) ? 1 : 0; } for ( nu = startId [ TPL_NODE_NUMANODE ]; nu < stopId [ TPL_NODE_NUMANODE ]; nu ++ ) { if ( upToTplLevel == TPL_NODE_NUMANODE ) { activeNodesNum += (machineDescription.sockets [ so ].numaNodes [ nu ].activeThreadsNum > 0) ? 1 : 0; } for ( co = startId [ TPL_NODE_CORE ]; co < stopId [ TPL_NODE_CORE ]; co ++ ) { if ( upToTplLevel == TPL_NODE_CORE ) { activeNodesNum += (machineDescription.sockets [ so ].numaNodes [ nu ].cores [ co ].activeThreadsNum > 0) ? 1 : 0; } for ( pu = startId [ TPL_NODE_PU ]; pu < stopId [ TPL_NODE_PU ]; pu ++ ) { if ( upToTplLevel == TPL_NODE_PU ) { activeNodesNum += machineDescription.sockets [ so ].numaNodes [ nu ].cores [ co ].pus [ pu]->activeThreadsNum; } } } } } } else { activeNodesNum = tree_barrier->threadsNum; } return activeNodesNum; } #ifdef TRNM_STAT_WIN static int tree_GetTPLNodeLevelSignatureByPU( tpl_NodeType_t tplLevel, tpl_PU_t * pu) { switch ( tplLevel ) { case TPL_NODE_SOCKET: return pu->socketId; case TPL_NODE_NUMANODE: return pu->numaNodeId + (pu->socketId * machineDescription.summary->numaNodesPerSocketNum); default: /* precondition */ bar_InternalError( __FILE__, __LINE__); } } static int tree_CalculateSTNInodeThreadWriteId( tree_barrier_t * tree_barrier, tree_build_context_t * tbc) { int threadWriteId = tree_barrier->inodesNum; int tplDelta; int curDelta = 0; int curTplSignature, newTplSignature; if ( tbc->curTplLevel > TPL_NODE_NUMANODE ) { return threadWriteId; } tplDelta = math_pow( tree_barrier->radix, (tbc->reachHeight [ tbc->curTplLevel ] - tbc->curHeight [ tbc->curTplLevel ] - 1)); curTplSignature = tree_GetTPLNodeLevelSignatureByPU( tbc->curTplLevel, bar_ThreadIdToPUMap [ tree_barrier->inodesNum ]); for ( threadWriteId = tree_barrier->inodesNum; threadWriteId < tree_barrier->threadsNum; threadWriteId ++ ) { newTplSignature = tree_GetTPLNodeLevelSignatureByPU( tbc->curTplLevel, bar_ThreadIdToPUMap [ threadWriteId ]); if ( newTplSignature != curTplSignature ) { curDelta++; curTplSignature = newTplSignature; } if ( curDelta == tplDelta ) { break; } } if ( threadWriteId == tree_barrier->threadsNum) { bar_InternalError( __FILE__, __LINE__); } return threadWriteId; } #endif static int tree_inode_construct( tree_barrier_t * tree_barrier, tree_build_context_t * tbc, tree_node_t ** child) { int threadAllocId; int threadWriteId = #ifdef TRNM_STAT_WIN tree_CalculateSTNInodeThreadWriteId( tree_barrier, tbc); #else tree_barrier->inodesNum; #endif #ifdef T_GLOBAL_SENSE if ( tbc->parent == NULL ) { tree_barrier->sense = bar_MemAlloc( sizeof( bool), threadWriteId, tree_barrier->inodesNum, NULL); (* tree_barrier->sense) = FALSE; } #endif tree_barrier->inodes [ tree_barrier->inodesNum ] = bar_MemAlloc( sizeof( tree_node_t), tree_barrier->inodesNum, threadWriteId, & threadAllocId); *child = tree_barrier->inodes [ tree_barrier->inodesNum ]; #ifdef T_LOCAL_SENSE (*child)->sense = bar_MemAlloc( sizeof( bool), threadWriteId, tree_barrier->inodesNum, NULL); (* ((*child)->sense)) = FALSE; #endif tree_barrier->inodesNum ++; #ifdef COMBINED_BARRIER (*child)->count = 0; (*child)->threadsNum = 0; #endif #ifdef TRNM_BARRIER (*child)->tier = tbc->curHeight [ tbc->curTplLevel ]; (*child)->trnmDataCurr.full = TRNM_FALSE; (*child)->trnmDataInit [ PARITY_EVEN ].full = TRNM_FALSE; (*child)->trnmDataInit [ PARITY_ODD ].full = TRNM_FALSE; #endif (*child)->parent = tbc->parent; return threadAllocId; } static inline void tree_AdjustReachHeight( tree_barrier_t * tree_barrier, tree_build_context_t * tbc, int edgeId) { int curHeight, threadsRest, deltaHeight, curHeightEdgesRest; if ( #ifdef TRNM_STAT_WIN edgeId == TRNM_STAT_WIN_ID #else edgeId == 0 #endif ) { return; } threadsRest = tbc->tplLevelLeavesToConstruct [ tbc->curTplLevel ]; #ifdef TRNM_STAT_WIN curHeightEdgesRest = (tree_barrier->radix - 1) - edgeId; #else curHeightEdgesRest = tree_barrier->radix - edgeId; #endif bar_Assert( threadsRest > 0); deltaHeight = math_logNCeil( tree_barrier->radix, (threadsRest + curHeightEdgesRest - 1) / curHeightEdgesRest) + 1; if ( deltaHeight < tbc->reachHeight [ tbc->curTplLevel ] - tbc->curHeight [ tbc->curTplLevel ] ) { tbc->reachHeight [ tbc->curTplLevel ] = tbc->curHeight [ tbc->curTplLevel ] + deltaHeight; } } static inline void tree_barrier_action_before_edge_construction( tree_barrier_t * tree_barrier, tree_build_context_t * tbc, int edgeId, tree_node_t * child, int * leavesNumBEC) { /* actions on barrier */ (*leavesNumBEC) = tree_barrier->leavesNum; /* adjust reach height */ tree_AdjustReachHeight( tree_barrier, tbc, edgeId); /* actions on context */ tbc->parent = child; tbc->curHeight [ tbc->curTplLevel ] ++; if ( tbc->curHeight [ tbc->curTplLevel ] == tbc->reachHeight [ tbc->curTplLevel ] ) { tbc->tplLevelLeavesToConstruct [ tbc->curTplLevel ] --; } } static inline void tree_barrier_action_after_edge_construction( tree_barrier_t * tree_barrier, tree_build_context_t * tbc, int edgeId, tree_node_t * child, int * leavesNumBEC) { /* actions on context */ tbc->curHeight [ tbc->curTplLevel ]--; /* actions on barrier */ #ifdef COMBINED_BARRIER child->count++; child->threadsNum++; #endif #ifdef TRNM_BARRIER # ifdef TRNM_STAT_WIN if ( edgeId != TRNM_STAT_WIN_ID ) { child->trnmDataInit [ PARITY_ODD ].part [ edgeId ] = TRNM_TRUE; } # endif # ifdef TRNM_DYNM_WIN child->trnmDataInit [ PARITY_ODD ].part [ edgeId ] = TRNM_TRUE; # endif { int i; for ( i = (*leavesNumBEC); i < tree_barrier->leavesNum; i++ ) { tree_barrier->partIdMap [ i ] [ tbc->curHeight [ tbc->curTplLevel ] ] = edgeId; } } #endif } static inline void tree_barrier_action_on_inode_construction( tree_build_context_t * tbc) { } static void tree_barrier_action_on_node_start( tree_barrier_t * tree_barrier, tree_build_context_t * tbc) { while ( (tbc->curHeight [ tbc->curTplLevel ] == tbc->reachHeight [ tbc->curTplLevel ]) && (tbc->curTplLevel < TPL_NODE_PU) ) { int deltaHeight; int activeNodesNum; tpl_NodeType_t startTplLevel = tbc->curTplLevel + 1; tpl_NodeType_t nextTplLevel = startTplLevel; tbc->curTplLevel = startTplLevel; tbc->curHeight [ tbc->curTplLevel ] = tbc->reachHeight [ tbc->curTplLevel - 1 ]; while ( !machineDescription.summary->avoidInterNodeConnections [ nextTplLevel ] && (nextTplLevel != TPL_NODE_PU) ) { nextTplLevel++; } while ( tbc->curTplLevel < nextTplLevel ) { tbc->reachHeight [ tbc->curTplLevel ] = tbc->curHeight [ tbc->curTplLevel ]; tbc->tplLevelLeavesToConstruct [ tbc->curTplLevel ] = 0; tbc->curTplLevel++; tbc->curHeight [ tbc->curTplLevel ] = tbc->reachHeight [ tbc->curTplLevel - 1 ]; } activeNodesNum = tree_GetActiveNodesFromUpToLevel( tree_barrier, tbc, startTplLevel, nextTplLevel); deltaHeight = math_logNCeil( tree_barrier->radix, activeNodesNum); tbc->reachHeight [ tbc->curTplLevel ] = tbc->curHeight [ tbc->curTplLevel ] + deltaHeight; if ( deltaHeight > 0) { tbc->tplLevelLeavesToConstruct [ tbc->curTplLevel ] = activeNodesNum; } else { tbc->tplLevelLeavesToConstruct [ tbc->curTplLevel ] = 0; } } /* special case for experimenting with one thread */ if ( (tbc->curTplLevel >= TPL_NODE_PU) && (tree_barrier->threadsNum == 1) && (tbc->reachHeight [ tbc->curTplLevel ] == 0) ) { tbc->reachHeight [ tbc->curTplLevel ] ++; tbc->tplLevelLeavesToConstruct [ tbc->curTplLevel ] = 1; } } static inline void tree_barrier_action_on_node_finish( tree_build_context_t * tbc) { if ( TOPOLOGY_AWARE_MAPPING == TRUE ) { bar_Assert( (tbc->curTplLevel > TPL_NODE_MACHINE) && (tbc->curTplLevel < TPL_NODE_UNDEFINED)); while ( tbc->curHeight [ tbc->curTplLevel ] == tbc->curHeight [ tbc->curTplLevel - 1 ] ) { tbc->curTplLevel--; if ( tbc->curTplLevel == TPL_NODE_MACHINE ) { bar_Assert( tbc->curHeight [ TPL_NODE_MACHINE ] == 0); return; } } } } static bool tree_barrier_is_leaf_to_be_constructed( tree_build_context_t * tbc) { return (tbc->curHeight [ tbc->curTplLevel ] == tbc->reachHeight [ tbc->curTplLevel ]); } static void tree_barrier_build_tree( tree_barrier_t * tree_barrier, tree_build_context_t * tbc) { int i, leavesNumBEC; tree_node_t * child; tree_barrier_action_on_node_start( tree_barrier, tbc); if ( tree_barrier_is_leaf_to_be_constructed( tbc) ) { if ( tree_barrier->leavesNum < tree_barrier->threadsNum ) { tree_barrier->leaves [ tree_barrier->leavesNum++ ] = tbc->parent; } } else { int threadAllocId, threadId = tree_barrier->inodesNum; threadAllocId = tree_inode_construct( tree_barrier, tbc, & child); tree_barrier_action_on_inode_construction( tbc); for ( # ifdef TRNM_STAT_WIN i = TRNM_STAT_WIN_ID; i < tree_barrier->radix - 1; # else i = 0; i < tree_barrier->radix; # endif i++ ) { if ( tbc->tplLevelLeavesToConstruct [ tbc->curTplLevel ] ) { tbc->parentEdgeId = i; tree_barrier_action_before_edge_construction( tree_barrier, tbc, i, child, &leavesNumBEC); tree_barrier_build_tree( tree_barrier, tbc); tree_barrier_action_after_edge_construction( tree_barrier, tbc, i, child, &leavesNumBEC); } } } tree_barrier_action_on_node_finish( tbc); } static void tree_InitBuildContext( tree_barrier_t * tree_barrier, tree_build_context_t * tbc, unsigned radix, unsigned barrierCount) { tbc->parent = NULL; if ( TOPOLOGY_AWARE_MAPPING == FALSE ) { int height = 1; if ( barrierCount > 1 ) { height = math_logNCeil( radix, barrierCount); } tbc->curTplLevel = TPL_NODE_UNDEFINED; tbc->curHeight [ TPL_NODE_UNDEFINED ] = 0; tbc->reachHeight [ TPL_NODE_UNDEFINED ] = height; tbc->tplLevelLeavesToConstruct [ TPL_NODE_UNDEFINED ] = barrierCount; } else { tbc->curTplLevel = TPL_NODE_MACHINE; tbc->curHeight [ TPL_NODE_MACHINE ] = 0; tbc->reachHeight [ TPL_NODE_MACHINE ] = 0; tbc->tplLevelLeavesToConstruct [ TPL_NODE_MACHINE ] = 0; } tbc->root = tree_barrier->inodes [ 0 ]; } static void tree_barrier_init( tree_barrier_t * tree_barrier, tls_DataSet_t * tlsDataSet, int barrier_count, int radix, int barrierId, int threadBaseId) { tree_build_context_t tbc; int nodes_count = 0; int i = 0; bar_Assert( barrier_count > 0 && radix > 1); tree_barrier->radix = radix; tree_barrier->leavesNum = 0; tree_barrier->inodesNum = 0; tree_barrier->threadsNum = barrier_count; tree_barrier->barrierId = barrierId; for ( i = 0; i < barrier_count; i++ ) { tlsDataSet->tlsData [ threadBaseId + i ]->leafId = i; } tree_InitBuildContext( tree_barrier, & tbc, radix, barrier_count); tree_barrier_build_tree( tree_barrier, & tbc); } static inline void tree_barrier_set_sense( volatile bool * addr, bool sense) { #ifdef ARCH_STORE_NR_NGO store_nr_ngo_bool( (void *) addr, sense); #else # ifdef ARCH_STORE_NR store_nr_bool( (void *) addr, sense); # else (* addr) = sense; # endif #endif } static inline void tree_barrier_set_count( volatile int * addr, int count) { #ifdef ARCH_STORE_NR_NGO store_nr_ngo_int( (void *) addr, count); #else # ifdef ARCH_STORE_NR store_nr_int( (void *) addr, count); # else (* addr) = count; # endif #endif } static inline void tree_barrier_set_full( int_max_ma_vol_t * addr, int_max_ma_t full) { #ifdef ARCH_STORE_NR_NGO store_nr_ngo_int_max_ma( (void *) addr, full); #else # ifdef ARCH_STORE_NR store_nr_int_max_ma( (void *) addr, full); # else (* addr) = full; # endif #endif } static inline int_max_ma_t tree_barrier_load_full( int_max_ma_vol_t * addr) { #ifdef ARCH_LOAD_NC return load_nc_int_max_ma( (void *) addr); #else return (* addr); #endif } static inline bool tree_barrier_load_sense( volatile bool * addr) { #ifdef ARCH_LOAD_NC return load_nc_bool( (void *) addr); #else return (* addr); #endif } static inline void tree_barrier_wait( tree_barrier_t * tree_barrier, tls_Data_t * tlsData, tree_node_t * node) { int * senseP = & (tlsData->sense[ tree_barrier->barrierId ].data); #ifdef COMBINED_BARRIER int currCount = fetch_and_add( & (node->count), -1); #endif int currSense = *senseP; #ifdef TRNM_BARRIER int partId = tree_barrier->partIdMap [ tlsData->leafId ] [ node->tier ]; int_min_ma_vol_t * partP = & (node->trnmDataCurr.part [ partId ]); int_max_ma_vol_t * fullP = & (node->trnmDataCurr.full); # ifdef TRNM_STAT_WIN bool isWinner = (partId == TRNM_STAT_WIN_ID); if ( !isWinner ) { (*partP) = currSense; } # endif # ifdef TRNM_DYNM_WIN (*partP) = currSense; #ifdef WFE_SPINNING spinning_thread_wfe_send( ); #endif /* In case when Intra-Processor Forwarding Is Allowed (as in X86 ISA 8.2.3.5) the load above and store below in if statement may lead to all threads going to busy-waiting. */ #ifdef INTRA_PROCESSOR_FORWARDING_ALLOWED memory_barrier( ); #endif # endif #endif if ( #ifdef COMBINED_BARRIER currCount == 1 #endif #ifdef TRNM_BARRIER # ifdef TRNM_STAT_WIN isWinner == TRUE # endif # ifdef TRNM_DYNM_WIN tree_barrier_load_full( fullP) == node->trnmDataInit [ currSense ].full # endif #endif ) { #ifdef TRNM_BARRIER # ifdef TRNM_STAT_WIN # ifdef WFE_SPINNING if ( tree_barrier_load_full( fullP) != node->trnmDataInit [ currSense ].full ) { spinning_thread_wfe_init( (void *) fullP); } # endif while ( tree_barrier_load_full( fullP) != node->trnmDataInit [ currSense ].full ) { # ifdef YIELD_SPINNING spinning_thread_yield( ); # endif # ifdef PAUSE_SPINNING spinning_pause( ); # endif # ifdef WFE_SPINNING spinning_thread_wfe_wait( ); # endif } # endif #endif /* TRNM_BARRIER */ #ifdef T_GLOBAL_SENSE # ifdef COMBINED_BARRIER tree_barrier_set_count( & (node->count), node->threadsNum); # endif #endif if ( node->parent ) { tree_barrier_wait( tree_barrier, tlsData, node->parent); } #ifdef T_GLOBAL_SENSE # ifdef COMBINED_BARRIER asm volatile("": : :"memory"); # ifdef ARCH_STORE_NR_NGO_REFINED asm volatile( "lock; addl $0,(%rsp)\n"); # endif # endif tree_barrier_set_sense( tree_barrier->sense, currSense); #endif #ifdef T_LOCAL_SENSE # ifdef COMBINED_BARRIER tree_barrier_set_count( & (node->count), node->threadsNum); asm volatile("": : :"memory"); # ifdef ARCH_STORE_NR_NGO_REFINED asm volatile( "lock; addl $0,(%rsp)\n"); # endif # endif tree_barrier_set_sense( node->sense, currSense); #endif #ifdef WFE_SPINNING spinning_thread_wfe_send( ); #endif } else { #ifdef T_GLOBAL_SENSE # ifdef WFE_SPINNING if ( currSense != tree_barrier_load_sense( tree_barrier->sense) ) { spinning_thread_wfe_init( (void *) tree_barrier->sense); } # endif while ( currSense != tree_barrier_load_sense( tree_barrier->sense) ) #endif #ifdef T_LOCAL_SENSE # ifdef WFE_SPINNING if ( currSense != tree_barrier_load_sense( node->sense) ) { spinning_thread_wfe_init( (void *) node->sense); } # endif while ( currSense != tree_barrier_load_sense( node->sense) ) #endif { #ifdef YIELD_SPINNING spinning_thread_yield( ); #endif #ifdef PAUSE_SPINNING spinning_pause( ); #endif #ifdef WFE_SPINNING spinning_thread_wfe_wait( ); #endif }; } tree_barrier_set_sense( senseP, !currSense); #ifdef ARCH_STORE_NR_NGO_REFINED asm volatile( "lock; addl $0,(%rsp)\n"); #endif } #endif /* TREE_BARRIER */ #if defined( DSMN_BARRIER) || defined( DSMNH_BARRIER) static void dsmn_barrier_init( dsmn_barrier_t * dsmn_barrier, tls_DataSet_t * dsmn_barrier_tls_data_set, int threadsNum, int barrierId) { int id; int l; int ceilLog2ThreadsNum = (threadsNum > 1) ? math_log2Ceil( threadsNum) : 1; dsmn_barrier->barrierId = barrierId; dsmn_barrier->ceilLog2ThreadsNum = ceilLog2ThreadsNum; for ( id = 0; id < threadsNum; id++ ) { for ( l = 0; l < ceilLog2ThreadsNum; l++ ) { int threadAllocId; parity_t p; int partnerId = (id + (1 << l)) % threadsNum; for ( p = PARITY_EVEN; p < PARITY_NUM; p++ ) { dsmn_barrier_tls_data_set->tlsData [ id ]->my_flags [ p ] [ l ] [ barrierId ] = bar_MemAlloc( sizeof( tls_Sense_t), id, partnerId, & threadAllocId); dsmn_barrier_tls_data_set->tlsData [ id ]->my_flags [ p ] [ l ] [ barrierId ]->data = FALSE; } } dsmn_barrier_tls_data_set->tlsData [ id ]->parity [ barrierId ].data = PARITY_EVEN; dsmn_barrier_tls_data_set->tlsData [ id ]->sense [ barrierId ].data = TRUE; } for ( id = 0; id < threadsNum; id++ ) { for ( l = 0; l < ceilLog2ThreadsNum; l++ ) { parity_t p; int partnerId = (id + (1 << l)) % threadsNum; for ( p = PARITY_EVEN; p < PARITY_NUM; p++ ) { dsmn_barrier_tls_data_set->tlsData [ id ]->partner_flags [ p ] [ l ] [ barrierId ] = dsmn_barrier_tls_data_set->tlsData [ partnerId ]->my_flags [ p ] [ l ] [ barrierId ]; } } } } static inline void dsmn_barrier_store_bool( volatile bool * addr, bool data) { #ifdef ARCH_STORE_NR_NGO store_nr_ngo_bool( (void *) addr, data); #else # ifdef ARCH_STORE_NR store_nr_bool( (void *) addr, data); # else (* addr) = data; # endif #endif } static inline bool dsmn_barrier_load_bool( volatile bool * addr) { #ifdef ARCH_LOAD_NC return load_nc_bool( (void *) addr); #else return (* addr); #endif } static inline void dsmn_barrier_store_sense( volatile bool * sense_addr, bool sense) { dsmn_barrier_store_bool( sense_addr, sense); } static inline void dsmn_barrier_set_tls_sense( volatile bool * sense_addr, bool sense) { dsmn_barrier_store_bool( sense_addr, sense); } static inline void dsmn_barrier_set_tls_parity( volatile bool * sense_addr, bool parity ) { dsmn_barrier_store_bool( sense_addr, parity); } static inline bool dsmn_barrier_load_sense( volatile bool * sense_addr) { return dsmn_barrier_load_bool( sense_addr); } static inline void dsmn_barrier_wait( dsmn_barrier_t * dsmn_barrier, tls_Data_t * dsmn_barrier_tls_data) { int l = 0; int barrierId = dsmn_barrier->barrierId; int ceilLog2ThreadsNum = dsmn_barrier->ceilLog2ThreadsNum; bool s = dsmn_barrier_tls_data->sense [ barrierId ].data; parity_t p = dsmn_barrier_tls_data->parity [ barrierId ].data; for ( l = 0; l < ceilLog2ThreadsNum; l++ ) { dsmn_barrier_store_sense( & (dsmn_barrier_tls_data->partner_flags [ p ] [ l ] [ barrierId ]->data), s); #ifdef WFE_SPINNING spinning_thread_wfe_send( ); #endif #ifdef WFE_SPINNING if ( dsmn_barrier_load_sense( & (dsmn_barrier_tls_data->my_flags [ p ] [ l ] [ barrierId ]->data)) != s ) { spinning_thread_wfe_init( (void *) & (dsmn_barrier_tls_data->my_flags [ p ] [ l ] [ barrierId ]->data)); } #endif while ( dsmn_barrier_load_sense( & (dsmn_barrier_tls_data->my_flags [ p ] [ l ] [ barrierId ]->data)) != s ) { #ifdef YIELD_SPINNING spinning_thread_yield( ); #endif #ifdef PAUSE_SPINNING spinning_pause( ); #endif #ifdef WFE_SPINNING spinning_thread_wfe_wait( ); #endif } } if ( p == PARITY_ODD ) { dsmn_barrier_set_tls_sense( & (dsmn_barrier_tls_data->sense [ barrierId ].data), !s); } dsmn_barrier_set_tls_parity( & (dsmn_barrier_tls_data->parity [ barrierId ].data), PARITY_ODD - p); #if defined( DSMN_BARRIER) && defined( ARCH_STORE_NR_NGO_REFINED) asm volatile( "lock; addl $0,(%rsp)\n"); #endif } #endif /* DSMN_BARRIER || DSMNH_BARRIER */ #ifdef DSMNH_BARRIER static void dsmnh_barrier_init( tls_DataSet_t * tlsDataSet, int threadsNum, int barrierId) { if ( machineDescription.machineId != UNDEFINED_MACHINE_ID ) { int so, nu, co; tpl_NodeType_t innermostAvoidConnectionsTplLevel; int activeDsmnThreadsNum = 0, threadId = 0; for ( innermostAvoidConnectionsTplLevel = TPL_NODE_SOCKET; innermostAvoidConnectionsTplLevel < TPL_NODES_DEFINED_NUM; innermostAvoidConnectionsTplLevel ++ ) { if ( !machineDescription.summary->avoidInterNodeConnections [ innermostAvoidConnectionsTplLevel ] ) break; } innermostAvoidConnectionsTplLevel--; if ((innermostAvoidConnectionsTplLevel != TPL_NODE_NUMANODE) && (innermostAvoidConnectionsTplLevel != TPL_NODE_CORE)) { /* FIXME: Not implemented */ bar_InternalError( __FILE__, __LINE__); } for ( so = 0; so < machineDescription.summary->socketsPerMachineNum; so++ ) { for ( nu = 0; nu < machineDescription.summary->numaNodesPerSocketNum; nu++ ) { if (innermostAvoidConnectionsTplLevel == TPL_NODE_NUMANODE) { int activeThreadsNum = machineDescription.sockets [ so ].numaNodes [ nu ].activeThreadsNum; if ( activeThreadsNum > 0 ) { int k; sr_barrier_init( & bar_srBarrier [ barrierId ][ activeDsmnThreadsNum ], NULL, activeThreadsNum, barrierId); for ( k = 0; k < activeThreadsNum; k++ ) { tlsDataSet->tlsData [ threadId + k ]->dsmnTlsData = tlsDataSet->tlsData [ activeDsmnThreadsNum ]; tlsDataSet->tlsData [ threadId + k ]->groupId = activeDsmnThreadsNum; } activeDsmnThreadsNum ++; threadId += activeThreadsNum; } } else { bar_Assert( innermostAvoidConnectionsTplLevel == TPL_NODE_CORE); for ( co = 0; co < machineDescription.summary->coresPerNumaNodeNum; co++ ) { int activeThreadsNum = machineDescription.sockets [ so ].numaNodes [ nu ].cores [ co ].activeThreadsNum; if ( activeThreadsNum > 0 ) { int k; sr_barrier_init( & bar_srBarrier [ barrierId ][ activeDsmnThreadsNum ], NULL, activeThreadsNum, barrierId); for ( k = 0; k < activeThreadsNum; k++ ) { tlsDataSet->tlsData [ threadId + k ]->dsmnTlsData = tlsDataSet->tlsData [ activeDsmnThreadsNum ]; tlsDataSet->tlsData [ threadId + k ]->groupId = activeDsmnThreadsNum; } activeDsmnThreadsNum ++; threadId += activeThreadsNum; } } } } } dsmn_barrier_init( & bar_dsmnBarrier [ barrierId ], tlsDataSet, activeDsmnThreadsNum, barrierId); } else { int i; for ( i = 0; i < threadsNum; i++) { sr_barrier_init( & bar_srBarrier [ barrierId ][ i ], NULL, 1, barrierId); tlsDataSet->tlsData [ i ]->dsmnTlsData = tlsDataSet->tlsData [ i ]; tlsDataSet->tlsData [ i ]->groupId = i; } dsmn_barrier_init( & bar_dsmnBarrier [ barrierId ], tlsDataSet, threadsNum, barrierId); } } #endif /* DSMNH_BARRIER */ static inline void bar_BarrierTlsDataInit( int barrierId, tls_Data_t * tlsData) { #ifdef SR_BARRIER tlsData->sense [ barrierId ].data = !bar_srBarrier [ barrierId ].sense; #endif #ifdef TREE_BARRIER # ifdef T_GLOBAL_SENSE tlsData->sense [ barrierId ].data = !(* bar_treeBarrier [ barrierId ].sense); # endif # ifdef T_LOCAL_SENSE tlsData->sense [ barrierId ].data = !(* bar_treeBarrier [ barrierId ].inodes [ 0 ]->sense); # endif #endif #ifdef DSMNH_BARRIER tlsData->senseSR [ barrierId ][ tlsData->groupId ].data = !bar_srBarrier [ barrierId ][ tlsData->groupId ].sense; #endif } static inline void bar_BarrierWait( int barrierId, tls_Data_t * tlsData) { exp_Stage_t expStage = tlsData->expInfo->expStage; if ( expStage == EXP_STAGE_REF ) return; #ifdef OMP_BARRIER #pragma omp barrier #endif #ifdef PTHREAD_BARRIER pthread_barrier_wait( & bar_pthreadBarrier [ barrierId ]); #endif #ifdef SR_BARRIER sr_barrier_wait( & bar_srBarrier [ barrierId ], tlsData); #endif #ifdef TREE_BARRIER tree_barrier_wait( & bar_treeBarrier [ barrierId ], tlsData, bar_treeBarrier [ barrierId ].leaves [ tlsData->leafId ]); #endif #ifdef DSMN_BARRIER dsmn_barrier_wait( & bar_dsmnBarrier [ barrierId ], tlsData); #endif #ifdef DSMNH_BARRIER sr_barrier_wait( & bar_srBarrier [ barrierId ][ tlsData->groupId ], tlsData); #endif } static void bar_StartTimer( tls_Data_t * tlsData) { if ( tlsData->threadId != 0 ) return; exp_Timer_t * timer = & tlsData->expInfo->timer [ tlsData->expInfo->expStage ]; while ( clock_gettime( timer->clockId, & timer->startTime) ) { ; } } static void bar_StopTimer( tls_Data_t * tlsData) { if ( tlsData->threadId != 0 ) return; exp_Timer_t * timer = & tlsData->expInfo->timer [ tlsData->expInfo->expStage ]; while ( clock_gettime( timer->clockId, & timer->stopTime) ) { ; } timer->deltaTime = ((long long int)(timer->stopTime.tv_sec - timer->startTime.tv_sec)) * NANOSEC_IN_SEC + (long long int)(timer->stopTime.tv_nsec - timer->startTime.tv_nsec); } #ifdef TMPL_BENCHMARK static void * test_barrier_tmpl( tls_Data_t * tlsData) { bar_BarrierTlsDataInit( 0, tlsData); bar_BarrierWait( 0, tlsData); bar_StartTimer( tlsData); bar_StopTimer( tlsData); return NULL; } #endif /* TMPL_BENCHMARK */ #ifdef PURE_BENCHMARK /* 'volatile' is used to prevent compiler from elimination of the loop */ static inline void #define DUMMY_LOOP_ITERATIONS 16 test_Delay( ) { volatile int i; for ( i = 0; i < DUMMY_LOOP_ITERATIONS; i++ ) { ; } } #endif /* PURE_BENCHMARK */ #ifdef LDIMBL_BENCHMARK #define IMBALANCE_FACTOR 25 /* 'volatile' is used to prevent compiler from elimination of the loop */ static inline void test_LoadImbalance( volatile int threadId) { volatile int i; for ( i = 0; i < IMBALANCE_FACTOR * threadId; i++ ) { ; } } #endif /* LDIMBL_BENCHMARK */ #if defined( PURE_BENCHMARK) || defined( LDIMBL_BENCHMARK) static void * test_barrier_pure( tls_Data_t * tlsData) { int loBarNum = tlsData->expInfo->loBarNum; int hiBarNum = tlsData->expInfo->hiBarNum; #ifdef LDIMBL_BENCHMARK volatile int i = tlsData->threadId; #endif int j; bar_BarrierTlsDataInit( 0, tlsData); bar_BarrierWait( 0, tlsData); bar_StartTimer( tlsData); for ( j = loBarNum; j <= hiBarNum; j = j + 1 ) { #ifdef PURE_BENCHMARK test_Delay( ); #endif #ifdef LDIMBL_BENCHMARK test_LoadImbalance( i); #endif bar_BarrierWait( 0, tlsData); } bar_StopTimer( tlsData); return NULL; } #endif /* PURE_BENCHMARK */ #ifdef NBODY_BENCHMARK static void bar_InitNbody( exp_Info_t * expInfo) { int i; int particlesNum = expInfo->curThreadsNum; for ( i = 0; i < particlesNum; i ++ ) { bar_ParticlesBuf [ i ].x = ((float) rand( ) / (float)(RAND_MAX)) * 2.0 - 1.0; bar_ParticlesBuf [ i ].y = ((float) rand( ) / (float)(RAND_MAX)) * 2.0 - 1.0; bar_ParticlesBuf [ i ].z = ((float) rand( ) / (float)(RAND_MAX)) * 2.0 - 1.0; bar_ParticlesBuf [ i ].Vx = ((float) rand( ) / (float)(RAND_MAX)) * 2.0 - 1.0; bar_ParticlesBuf [ i ].Vy = ((float) rand( ) / (float)(RAND_MAX)) * 2.0 - 1.0; bar_ParticlesBuf [ i ].Vz = ((float) rand( ) / (float)(RAND_MAX)) * 2.0 - 1.0; } } static void * test_barrier_nbody( tls_Data_t * tlsData) { int loBarNum = tlsData->expInfo->loBarNum; int hiBarNum = tlsData->expInfo->hiBarNum; int threadsNum = tlsData->expInfo->curThreadsNum; int deltaBarNum = 2; const float dt = 0.01f; int i = tlsData->threadId; int j; int k; bar_BarrierTlsDataInit( 0, tlsData); bar_BarrierTlsDataInit( 1, tlsData); bar_BarrierWait( 0, tlsData); bar_StartTimer( tlsData); for ( j = 0; j <= (hiBarNum - loBarNum); j = j + deltaBarNum ) { float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; for ( k = 0; k < threadsNum; k ++ ) { if ( k != i ) { const float dx = bar_ParticlesBuf [ k ].x - bar_ParticlesBuf [ i ].x; const float dy = bar_ParticlesBuf [ k ].y - bar_ParticlesBuf [ i ].y; const float dz = bar_ParticlesBuf [ k ].z - bar_ParticlesBuf [ i ].z; const float dr_pow_plus_2 = dx * dx + dy * dy + dz * dz; const float dr_pow_min_3_2 = NBODY_CONST / ( dr_pow_plus_2 * sqrtf( dr_pow_plus_2)); Fx += dx * dr_pow_min_3_2; Fy += dy * dr_pow_min_3_2; Fz += dz * dr_pow_min_3_2; } } bar_ParticlesBuf [ i ].Vx += dt * Fx; bar_ParticlesBuf [ i ].Vy += dt * Fy; bar_ParticlesBuf [ i ].Vz += dt * Fz; bar_BarrierWait( 0, tlsData); bar_ParticlesBuf [ i ].x += bar_ParticlesBuf [ i ].Vx * dt; bar_ParticlesBuf [ i ].y += bar_ParticlesBuf [ i ].Vy * dt; bar_ParticlesBuf [ i ].z += bar_ParticlesBuf [ i ].Vz * dt; bar_BarrierWait( 1, tlsData); } bar_StopTimer( tlsData); return NULL; } #endif /* NBODY_BENCHMARK */ #ifdef SANITY_BENCHMARK static void bar_InitTestArray( exp_Info_t * expInfo) { int i; int threadsNum = expInfo->curThreadsNum; for ( i = 0; i < threadsNum; i ++) { bar_TestArray [ i ] = bar_MemAlloc( sizeof( int), i, i, NULL); } } static void * test_barrier_sanity( tls_Data_t * tlsData) { exp_Stage_t expStage = tlsData->expInfo->expStage; int loBarNum = tlsData->expInfo->loBarNum; int hiBarNum = tlsData->expInfo->hiBarNum; int threadsNum = tlsData->expInfo->curThreadsNum; int deltaBarNum = threadsNum * 2 + 1; int i = tlsData->threadId; int j; int k; bar_BarrierTlsDataInit( 0, tlsData); bar_BarrierTlsDataInit( 1, tlsData); bar_BarrierTlsDataInit( 2, tlsData); bar_BarrierWait( 0, tlsData); bar_StartTimer( tlsData); for ( j = 0; j <= (hiBarNum - loBarNum); j = j + deltaBarNum ) { (* bar_TestArray [ i ]) = 0; for ( k = 0; k < threadsNum; k++ ) { int t; bar_BarrierWait( 0, tlsData); t = (* bar_TestArray [ (i + j + k) % threadsNum ]); bar_BarrierWait( 1, tlsData); #ifndef NDEBUG printf( " [%d] -> [%d] val: %d\n", i, (i + j + k) % threadsNum, t); #endif (* bar_TestArray [ i ]) = t + 1; } bar_BarrierWait( 2, tlsData); #ifndef NDEBUG printf( "res id: %d val: %d\n", i, (* bar_TestArray [ i ])); #endif if ( (* bar_TestArray [ i ]) != threadsNum ) { if ( tlsData->expInfo->expStage == EXP_STAGE_EXP ) { bar_InternalError( __FILE__, __LINE__); } } } bar_StopTimer( tlsData); return NULL; } #endif /* SANITY_BENCHMARK */ static inline void bar_BarriersInit( exp_Info_t * expInfo, tls_DataSet_t * tlsDataSet) { int j; #if defined( TREE_BARRIER) int radix = expInfo->curRadixNum; #endif int threadsNum = expInfo->curThreadsNum; for ( j = 0; j < BARRIERS_MAX_NUM; j++ ) { #ifdef PTHREAD_BARRIER pthread_barrier_init( & bar_pthreadBarrier [ j ], NULL, threadsNum); #endif #ifdef SR_BARRIER sr_barrier_init( & bar_srBarrier [ j ], NULL, threadsNum, j); #endif #ifdef TREE_BARRIER tree_barrier_init( & bar_treeBarrier [ j ], tlsDataSet, threadsNum, radix, j, 0); #endif #ifdef DSMN_BARRIER dsmn_barrier_init( & bar_dsmnBarrier [ j ], tlsDataSet, threadsNum, j); #endif #ifdef DSMNH_BARRIER dsmnh_barrier_init( tlsDataSet, threadsNum, j); #endif } } static inline void bar_InitMachineActivity( ) { int so, nu, co, pu; machineDescription.activeThreadsNum = 0; for ( so = 0; so < machineDescription.summary->socketsPerMachineNum; so ++ ) { machineDescription.sockets [ so ].activeThreadsNum = 0; for ( nu = 0; nu < machineDescription.summary->numaNodesPerSocketNum; nu ++ ) { machineDescription.sockets [ so ].numaNodes [ nu ].activeThreadsNum = 0; machineDescription.sockets [ so ].numaNodes [ nu ].allocatedInodesNum = 0; for ( co = 0; co < machineDescription.summary->coresPerNumaNodeNum; co ++ ) { machineDescription.sockets [ so ].numaNodes [ nu ].cores [ co ].activeThreadsNum = 0; for ( pu = 0; pu < machineDescription.summary->pusPerCoreNum; pu ++ ) { machineDescription.sockets [ so ].numaNodes [ nu ].cores [ co ].pus [ pu ]->activeThreadsNum = 0; machineDescription.sockets [ so ].numaNodes [ nu ].cores [ co ].pus [ pu ]->firstThreadData = 0; machineDescription.sockets [ so ].numaNodes [ nu ].cores [ co ].pus [ pu ]->lastThreadData = 0; } } } } } static inline void bar_IncrementMachineActivity( unsigned so, unsigned nu, unsigned co, unsigned pu, unsigned activeThreadsNum) { machineDescription.activeThreadsNum += activeThreadsNum; machineDescription.sockets [ so ].activeThreadsNum += activeThreadsNum; machineDescription.sockets [ so ].numaNodes [ nu ].activeThreadsNum += activeThreadsNum; machineDescription.sockets [ so ].numaNodes [ nu ].cores [ co ].activeThreadsNum += activeThreadsNum; machineDescription.sockets [ so ].numaNodes [ nu ].cores [ co ].pus [ pu ]->activeThreadsNum += activeThreadsNum; } static void tpl_AddThreadDataToPU( tls_Data_t * addedData, tpl_PU_t * pu) { if ( pu->lastThreadData == NULL ) { pu->firstThreadData = addedData; pu->lastThreadData = addedData; } else { pu->lastThreadData->nextThreadData = addedData; pu->lastThreadData = addedData; } } static inline void bar_TlsDataFini( void) { } static inline void bar_TlsDataAlloc( tls_DataSet_t * tlsDataSet, int threadId, tpl_PU_t * curPU) { tlsDataSet->tlsData [ threadId ] = (tls_Data_t *) bar_MemAlloc( sizeof( tls_Data_t), threadId, threadId, NULL); tlsDataSet->tlsData [ threadId ]->curPU = curPU; } static inline void bar_TlsDataNew( exp_Info_t * expInfo, tls_DataSet_t * tlsDataSet, int threadId, tpl_PU_t * curPU) { bar_ThreadIdToPUMap [ threadId ] = curPU; bar_TlsDataAlloc( tlsDataSet, threadId, curPU); tlsDataSet->tlsData [ threadId ]->threadId = threadId; tlsDataSet->tlsData [ threadId ]->expInfo = expInfo; tlsDataSet->tlsData [ threadId ]->nextThreadData = NULL; if ( curPU != NULL ) { tpl_AddThreadDataToPU( tlsDataSet->tlsData [ threadId ], curPU); } } static inline void bar_TlsDataInit( exp_Info_t * expInfo, tls_DataSet_t * tlsDataSet) { int i, so, nu, co, pu; int puMax, coresNum, pusNum, pusPerCoreNum, coresPassed; int threadsNum = expInfo->curThreadsNum; unsigned machineId = machineDescription.machineId; unsigned osIdActivity [ PUS_PER_MACHINE_MAX_NUM ]; if ( machineId == UNDEFINED_MACHINE_ID ) { for ( i = 0; i < threadsNum; i++ ) { bar_TlsDataNew( expInfo, tlsDataSet, i, NULL); } return; } bar_InitMachineActivity( ); for ( i = 0; i < PUS_PER_MACHINE_MAX_NUM; i++ ) { osIdActivity [ i ] = 0; } if ( USER_DEFINED_ACTIVE_PUS_SELECTION == TRUE ) { int j; int threadId = 0; int threadsNum = expInfo->curThreadsNum; cpu_set_t onlineCpuSet; CPU_ZERO( &onlineCpuSet); sys_SetOnlineCpuSet( &onlineCpuSet); while( threadId < threadsNum ) { for ( j = 0; j < CPU_MAP_PRIORITY_DELTA; j++ ) { for ( i = j; (i < sizeof( cpu_set_t) * BITS_IN_BYTE) && threadId < threadsNum; i = i + CPU_MAP_PRIORITY_DELTA ) { tpl_PU_t * curPU; if ( !CPU_ISSET( i, &onlineCpuSet) ) continue; curPU = bar_OsIdToPUMap [ i ]; bar_IncrementMachineActivity( curPU->socketId, curPU->numaNodeId, curPU->coreId, curPU->puId, 1); if ( TOPOLOGY_AWARE_MAPPING == TRUE ) { osIdActivity [ i ] ++; } else { bar_TlsDataNew( expInfo, tlsDataSet, threadId, curPU); } threadId ++; } } } threadId = 0; if ( TOPOLOGY_AWARE_MAPPING == TRUE ) { for ( so = 0; so < machineDescription.summary->socketsPerMachineNum; so ++ ) { for ( nu = 0; nu < machineDescription.summary->numaNodesPerSocketNum; nu ++ ) { for ( co = 0; co < machineDescription.summary->coresPerNumaNodeNum; co ++ ) { for ( pu = 0; pu < machineDescription.summary->pusPerCoreNum; pu ++ ) { tpl_PU_t * curPU = & tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ]; while ( osIdActivity [ curPU->osId ] -- ) { bar_TlsDataNew( expInfo, tlsDataSet, threadId, curPU); threadId ++; } } } } } } } else { /* automatic threads-to-PUs mapping */ switch ( machineDescription.summary->topologyType ) { case TPL_TYPE_HOMOGENEOUS_SYMMETRIC: { coresNum = machineDescription.summary->socketsPerMachineNum * machineDescription.summary->numaNodesPerSocketNum * machineDescription.summary->coresPerNumaNodeNum; pusPerCoreNum = machineDescription.summary->pusPerCoreNum; pusNum = coresNum * pusPerCoreNum; i = 0; coresPassed = 0; while ( i < threadsNum ) { for ( so = 0; so < machineDescription.summary->socketsPerMachineNum; so ++ ) { for ( nu = 0; nu < machineDescription.summary->numaNodesPerSocketNum; nu ++ ) { for ( co = 0; co < machineDescription.summary->coresPerNumaNodeNum; co ++ ) { int pusPerCoreMax = (((machineDescription.summary->smtType == TPL_SMT_HT) || (machineDescription.summary->smtType == TPL_SMT_MIC)) && (threadsNum <= pusNum)) ? ((threadsNum / coresNum) + (((threadsNum - i) % (coresNum - coresPassed)) > 0)) : machineDescription.summary->pusPerCoreNum; coresPassed ++; for ( pu = 0; pu < pusPerCoreMax; pu ++ ) { int repPu = (threadsNum > pusNum) * (((threadsNum - pusNum) / pusNum) + (i < (threadsNum % pusNum))); bar_IncrementMachineActivity( so, nu, co, pu, repPu + 1); do { bar_TlsDataNew( expInfo, tlsDataSet, i, & tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ]); i++; if ( i == threadsNum ) { if ( repPu ) { bar_InternalError( __FILE__, __LINE__); } goto exitAutoMapping; } } while ( repPu-- ); } } } } } exitAutoMapping: ; break; } case TPL_TYPE_HOMOGENEOUS_ASYMMETRIC: case TPL_TYPE_HETEROGENEOUS: default: bar_InternalError( __FILE__, __LINE__); } } } #ifdef OMP_BARRIER static inline bar_SetOMPThreadAffinity( exp_Info_t * expInfo, int threadId) { int osId; # ifdef OMP_INTEL int ret; kmp_affinity_mask_t mask; osId = bar_ThreadIdToOsIdMap [ expInfo->curThreadsNum - 1 ] [ threadId ]; kmp_create_affinity_mask( &mask); kmp_set_affinity_mask_proc( osId, & mask); ret = kmp_set_affinity( & mask); if ( ret ) { bar_InternalError( __FILE__, __LINE__); } # endif /* OMP_INTEL */ # ifdef OMP_GOMP /* FIXME */ # endif } #else /* OMP_BARRIER */ static inline bar_SetPthreadAffinity( exp_Info_t * expInfo, int threadId, pthread_attr_t * pthreadAttr) { int ret; cpu_set_t currCpuSet; int osId; osId = bar_ThreadIdToOsIdMap [ expInfo->curThreadsNum - 1 ] [ threadId ]; CPU_ZERO( &currCpuSet); CPU_SET( osId, &currCpuSet); ret = pthread_attr_init( & pthreadAttr[ threadId ]); if ( ret ) { bar_InternalError( __FILE__, __LINE__); } pthread_attr_setaffinity_np( & pthreadAttr[ threadId ], sizeof( currCpuSet), &currCpuSet); } #endif /* !OMP_BARRIER */ static inline void bar_SetThreadAffinityHelperByThreadIdToPUMap( exp_Info_t * expInfo, #ifdef OMP_BARRIER int ompThreadNum #else pthread_attr_t * pthreadAttr #endif ) { int threadId = 0; while( threadId < expInfo->curThreadsNum ) { bar_ThreadIdToOsIdMap [ expInfo->curThreadsNum - 1 ] [ threadId ] = bar_ThreadIdToPUMap [ threadId ]->osId; #ifdef OMP_BARRIER bar_SetOMPThreadAffinity( expInfo, threadId); #else bar_SetPthreadAffinity( expInfo, threadId, pthreadAttr); #endif threadId++; } } static inline void bar_SetThreadAffinityHelperDefault( exp_Info_t * expInfo, #ifdef OMP_BARRIER int ompThreadNum #else pthread_attr_t * pthreadAttr #endif ) { int i, j; int threadId = 0; int threadsNum = expInfo->curThreadsNum; cpu_set_t onlineCpuSet; CPU_ZERO( &onlineCpuSet); sys_SetOnlineCpuSet( &onlineCpuSet); while( threadId < threadsNum ) { for ( j = 0; j < CPU_MAP_PRIORITY_DELTA; j++ ) { for ( i = j; (i < sizeof( cpu_set_t) * BITS_IN_BYTE) && threadId < threadsNum; i = i + CPU_MAP_PRIORITY_DELTA ) { if ( !CPU_ISSET( i, &onlineCpuSet) ) continue; #ifdef OMP_BARRIER { if ( threadId == ompThreadNum ) { bar_ThreadIdToOsIdMap [ expInfo->curThreadsNum - 1 ] [ threadId ] = i; bar_SetOMPThreadAffinity( expInfo, threadId); } } #else { bar_ThreadIdToOsIdMap [ expInfo->curThreadsNum - 1 ] [ threadId ] = i; bar_SetPthreadAffinity( expInfo, threadId, pthreadAttr); } #endif threadId++; } } } } static inline void bar_SetThreadAffinityHelper( exp_Info_t * expInfo, #ifdef OMP_BARRIER int ompThreadNum #else pthread_attr_t * pthreadAttr #endif ) { if ( machineDescription.machineId == UNDEFINED_MACHINE_ID ) { bar_SetThreadAffinityHelperDefault( expInfo, #ifdef OMP_BARRIER ompThreadNum #else pthreadAttr #endif ); } else { bar_SetThreadAffinityHelperByThreadIdToPUMap( expInfo, #ifdef OMP_BARRIER ompThreadNum #else pthreadAttr #endif ); } } #ifdef OMP_BARRIER static inline void bar_OmpSetThreadAffinity( exp_Info_t * expInfo, int ompThreadNum) { bar_SetThreadAffinityHelper( expInfo, ompThreadNum); } #else /* OMP_BARRIER */ static inline void bar_PthreadAttrsInit( exp_Info_t * expInfo, pthread_attr_t * pthreadAttr) { bar_SetThreadAffinityHelper( expInfo, pthreadAttr); } #endif /* !OMP_BARRIER */ static inline void bar_BarriersFini( void) { int i; for ( i = 0; i < BARRIERS_MAX_NUM; i++ ) { #if defined( PTHREAD_BARRIER) pthread_barrier_destroy( & bar_pthreadBarrier [ i ]); #endif } } static inline void bar_PthreadAttrsFini( exp_Info_t * expInfo, pthread_attr_t * pthreadAttr) { int i; int ret; int threadsNum = expInfo->curThreadsNum; for ( i = 0; i < threadsNum; i++ ) { ret = pthread_attr_destroy( & pthreadAttr[ i ]); bar_Assert( !ret); } } static inline void bar_PthreadsFini( exp_Info_t * expInfo, pthread_t * pthread) { int i; int ret; int threadsNum = expInfo->curThreadsNum; for ( i = 0; i < threadsNum; i++ ) { pthread_join( pthread [ i ], NULL); bar_Assert( !ret); } } static inline void bar_CreateThreadsAndRunTest( tls_DataSet_t * tlsDataSet, exp_Info_t * expInfo, #ifndef OMP_BARRIER pthread_t * pthread, pthread_attr_t * pthreadAttr, #endif void * (* testFunc)(tls_Data_t *) ) { int i; int ret; int threadsNum = expInfo->curThreadsNum; #ifdef OMP_BARRIER # pragma omp parallel num_threads( threadsNum) { i = omp_get_thread_num( ); bar_OmpSetThreadAffinity( expInfo, i); testFunc( (void * __restrict__) tlsDataSet->tlsData [ i ]); } #else for ( i = 0; i < threadsNum; i++ ) { ret = pthread_create( & pthread [ i ], & pthreadAttr [ i ], (void * (*)(void *)) testFunc, (void * __restrict__) tlsDataSet->tlsData [ i ]); bar_Assert( !ret); } # ifndef NDEBUG printf( "Created number of pthreads: %i \n", threadsNum); # endif #endif } static void bar_TestBarrier( exp_Info_t * expInfo) { # ifndef OMP_BARRIER pthread_t pthread [ THREADS_MAX_NUM ]; pthread_attr_t pthreadAttr [ THREADS_MAX_NUM ]; # endif tls_DataSet_t tlsDataSet; int threadsNum = expInfo->curThreadsNum; long int i, j, ret; bar_MemReuse( ); bar_TlsDataInit( expInfo, & tlsDataSet); # ifdef SANITY_BENCHMARK bar_InitTestArray( expInfo); # endif # ifdef NBODY_BENCHMARK bar_InitNbody( expInfo); # endif bar_BarriersInit( expInfo, & tlsDataSet); # ifndef OMP_BARRIER bar_PthreadAttrsInit( expInfo, pthreadAttr); # endif #ifdef TMPL_BENCHMARK bar_CreateThreadsAndRunTest( & tlsDataSet, expInfo, # ifndef OMP_BARRIER pthread, pthreadAttr, # endif & test_barrier_tmpl); #else /* TMPL_BENCHMARK */ # ifdef NBODY_BENCHMARK bar_CreateThreadsAndRunTest( & tlsDataSet, expInfo, # ifndef OMP_BARRIER pthread, pthreadAttr, # endif & test_barrier_nbody); # endif # ifdef SANITY_BENCHMARK bar_CreateThreadsAndRunTest( & tlsDataSet, expInfo, # ifndef OMP_BARRIER pthread, pthreadAttr, # endif & test_barrier_sanity); # endif # if defined( PURE_BENCHMARK) || defined( LDIMBL_BENCHMARK) bar_CreateThreadsAndRunTest( & tlsDataSet, expInfo, # ifndef OMP_BARRIER pthread, pthreadAttr, # endif & test_barrier_pure); # endif #endif /* !TMPL_BENCHMARK */ # ifndef OMP_BARRIER bar_PthreadsFini( expInfo, pthread); bar_PthreadAttrsFini( expInfo, pthreadAttr); # endif bar_BarriersFini( ); bar_TlsDataFini( ); } static void bar_CheckPreconditions( ) { bar_Assert( BITS_IN_BYTE * sizeof( atomic_Data_t) == HW_ATOMIC_DATA_SIZE_IN_BITS); #if !defined( ARCH_LL_SC) && !defined( ARCH_CAS) && !defined( ARCH_FETCH_AND_ADD) bar_Assert( 0); #endif } #ifndef NDEBUG static void bar_PrintExperimentInfo( exp_Info_t * expInfo) { printf( "Number of logical cpus: %i \n", CPUS_NUM); #ifdef PTHREAD_BARRIER printf( "Test pthread barrier...\n"); #endif #ifdef SR_BARRIER printf( "Test sense reversing barrier...\n"); #endif #ifdef TREE_BARRIER # ifdef T_GLOBAL_SENSE printf( "Test combining barrier with global sense...\n"); # endif # ifdef T_LOCAL_SENSE printf( "Test combining barrier with local sense...\n"); # endif #endif #ifdef DSMN_BARRIER printf( "Test dsmn barrier...\n"); #endif #ifdef DSMNH_BARRIER printf( "Test dsmnh barrier...\n"); #endif } #endif /* !NDEBUG */ static void bar_PrintTableHeader( ) { printf( "%s", TABLE_HEADER); printf( "\n"); } #ifdef DELAYED_PRINT static void bar_SaveTableLine( exp_Info_t * expInfo) { int barriersNum; int radix; double timePerBarrier; #ifdef PRINT_SYNCH_UNSYNCH_PHASE_TIME double timePerUnsynchronizedPhase; #endif #if defined( PURE_BENCHMARK) || defined( LDIMBL_BENCHMARK) barriersNum = expInfo->hiBarNum; #endif #ifdef NBODY_BENCHMARK barriersNum = expInfo->hiBarNum; #endif #ifdef SANITY_BENCHMARK barriersNum = ((expInfo->hiBarNum + expInfo->curThreadsNum * 2) / (expInfo->curThreadsNum * 2 + 1)) * (expInfo->curThreadsNum * 2 + 1); #endif timePerBarrier = (double) (expInfo->timer [ EXP_STAGE_EXP ].deltaTime - expInfo->timer [ EXP_STAGE_REF ].deltaTime) / (double) (barriersNum); #ifdef PRINT_SYNCH_UNSYNCH_PHASE_TIME timePerUnsynchronizedPhase = (double) (expInfo->timer [ EXP_STAGE_REF ].deltaTime) / (double) (barriersNum); #endif if ( expInfo->timer [ EXP_STAGE_EXP ].deltaTime < expInfo->timer [ EXP_STAGE_REF ].deltaTime ) { bar_Assert( expInfo->curThreadsNum == 1); timePerBarrier = 0.0; #ifdef PRINT_SYNCH_UNSYNCH_PHASE_TIME timePerUnsynchronizedPhase = 0.0; #endif } bar_Assert( expInfo->currTableLine < EXP_LINES_NUM); #if defined( TREE_BARRIER) radix = expInfo->curRadixNum; #else radix = UNDEFINED_RADIX; #endif expInfo->tableLines [ expInfo->currTableLine ].threadsNum = expInfo->curThreadsNum; expInfo->tableLines [ expInfo->currTableLine ].radix = radix; expInfo->tableLines [ expInfo->currTableLine ].timePerBarrier = timePerBarrier; #ifdef PRINT_SYNCH_UNSYNCH_PHASE_TIME expInfo->tableLines [ expInfo->currTableLine ].timePerUnsynchronizedPhase = timePerUnsynchronizedPhase; #endif expInfo->currTableLine++; } static void bar_PrintAffinity( int curThreadsNum) { int i; for ( i = 0; i < curThreadsNum; i++ ) { if ( i != 0 ) { printf(" "); } printf( "%d", bar_ThreadIdToOsIdMap [ curThreadsNum - 1 ] [ i ]); } printf( ","); } static void bar_PrintTableLines( exp_Info_t * expInfo) { int i = 0; for ( i = 0; i < expInfo->currTableLine; i++) { printf( "%s,", HOSTNAME_STR); printf( "%s,", ARCH_STR); printf( "%s,", EXP_ID_STR); printf( "%s,", BENCH_STR); printf( "%s,", BARRIER_STR); printf( "%d,", expInfo->tableLines [ i ].radix); printf( "%s,", SPINNING_STR); printf( "%d,", expInfo->tableLines [ i ].threadsNum); bar_PrintAffinity( expInfo->tableLines [ i ].threadsNum); #ifdef TMPL_BENCHMARK # ifdef PRINT_SYNCH_UNSYNCH_PHASE_TIME printf( "%s,", TMPL_TIME); printf( "%8.2f,", 0.0); # endif printf( "%s\n", TMPL_TIME); #else # ifdef PRINT_SYNCH_UNSYNCH_PHASE_TIME printf( "%8.2f,", expInfo->tableLines [ i ].timePerUnsynchronizedPhase + expInfo->tableLines [ i ].timePerBarrier); printf( "%8.2f,", expInfo->tableLines [ i ].timePerUnsynchronizedPhase); # endif printf( " %8.2f\n", expInfo->tableLines [ i ].timePerBarrier); #endif } } #endif /* DELAYED_PRINT */ #ifndef DELAYED_PRINT static void bar_PrintTableLine( exp_Info_t * expInfo) { int barriersNum; double barOverhead; #ifdef PRINT_SYNCH_UNSYNCH_PHASE_TIME double barUnsynchronziedPhaseTime; #endif #if defined( PURE_BENCHMARK) || defined( LDIMBL_BENCHMARK) barriersNum = expInfo->hiBarNum; #endif #ifdef SANITY_BENCHMARK barriersNum = ((expInfo->hiBarNum + expInfo->curThreadsNum * 2) / (expInfo->curThreadsNum * 2 + 1)) * (expInfo->curThreadsNum * 2 + 1); #endif barOverhead = (double) (expInfo->timer [ EXP_STAGE_EXP ].deltaTime - expInfo->timer [ EXP_STAGE_REF ].deltaTime) / (double) (barriersNum); #ifdef PRINT_SYNCH_UNSYNCH_PHASE_TIME barUnsynchronziedPhaseTime = (double) (expInfo->timer [ EXP_STAGE_REF ].deltaTime) / (double) (barriersNum); #endif printf( "%s,", HOSTNAME_STR); printf( "%s,", ARCH_STR); printf( "%s,", EXP_ID_STR); printf( "%s,", BENCH_STR); printf( "%s,", BARRIER_STR); #if defined( TREE_BARRIER) printf( "%d,", expInfo->curRadixNum); #else printf( "%d,", UNDEFINED_RADIX); #endif printf( "%s,", SPINNING_STR); printf( "%d,", expInfo->curThreadsNum); bar_PrintAffinity( expInfo->curThreadsNum); #ifdef TMPL_BENCHMARK # ifdef PRINT_SYNCH_UNSYNCH_PHASE_TIME printf( "%s,", TMPL_TIME); printf( "%8.2f,", 0.0); # endif printf( "%s\n", TMPL_TIME); #else # ifdef PRINT_SYNCH_UNSYNCH_PHASE_TIME printf( "%8.2f,", barOverhead + barUnsynchronziedPhaseTime); printf( "%8.2f,", barUnsynchronziedPhaseTime); # endif printf( " %8.2f\n", barOverhead); #endif } #endif /* !DELAYED_PRINT */ static void bar_SetParentThreadAffinity( bar_ParentAffinity_t affinity) { int i; int ret = 0; cpu_set_t onlineCpuSet; CPU_ZERO( &onlineCpuSet); sys_SetOnlineCpuSet( &onlineCpuSet); switch ( affinity ) { case BAR_PARENT_AFFINITY_ONE: { i = sizeof( cpu_set_t) * BITS_IN_BYTE - 1; for ( ; ;) { cpu_set_t currCpuSet; if ( !CPU_ISSET( i, &onlineCpuSet) ) { if ( i == 0 ) { bar_InternalError( __FILE__, __LINE__); } i--; continue; } CPU_ZERO( &currCpuSet); CPU_SET( i, &currCpuSet); ret = pthread_setaffinity_np( pthread_self( ), sizeof( cpu_set_t), &currCpuSet); break; } break; } case BAR_PARENT_AFFINITY_ALL: ret = pthread_setaffinity_np( pthread_self( ), sizeof( cpu_set_t), &onlineCpuSet); break; default: bar_Assert( 0); } if ( ret ) { bar_InternalError( __FILE__, __LINE__); } } static void bar_SetOnlineCpuSet( ) { CPU_ZERO( & bar_onlineCpuSet); /* FIXME: Get online cpu set properly using lsproc or its code */ sched_getaffinity( 0, sizeof( cpu_set_t), & bar_onlineCpuSet); } static unsigned bar_ResolveMachineIdByHostname( const char * hostname) { int i; for ( i = 0; i < MACHINES_MAX_NUM; i++ ) { if ( !strcmp( hostname, bar_MachineIdToHostnameMap [ i ]) ) { return i; } } return UNDEFINED_MACHINE_ID; } static void tpl_InitMachineTopology( ) { unsigned machineId = machineDescription.machineId; int so, nu, co, pu; if ( machineId == UNDEFINED_MACHINE_ID ) return; switch ( machineDescription.summary->topologyType ) { case TPL_TYPE_HOMOGENEOUS_SYMMETRIC: { tpl_SMT_t smtType = machineDescription.summary->smtType; for ( so = 0; so < machineDescription.summary->socketsPerMachineNum; so++ ) { for ( nu = 0; nu < machineDescription.summary->numaNodesPerSocketNum; nu++ ) { machineDescription.sockets [ so ].numaNodes [ nu ].osId = (so * machineDescription.summary->numaNodesPerSocketNum) + nu; for ( co = 0; co < machineDescription.summary->coresPerNumaNodeNum; co++ ) { for ( pu = 0; pu < machineDescription.summary->pusPerCoreNum; pu++ ) { tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ].smtType = smtType; tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ].machineId = machineId; tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ].socketId = so; tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ].numaNodeId = nu; tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ].coreId = co; tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ].puId = pu; tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ].numaOsId = machineDescription.sockets [ so ].numaNodes [ nu ].osId; machineDescription.sockets [ so ].numaNodes [ nu ].cores [ co ]. pus [ pu ] = & tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ]; bar_OsIdToPUMap [ tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ].osId ] = & tpl_PUDescriptionsSet [ machineId ] [ so ] [ nu ] [ co ] [ pu ]; } } } } break; } case TPL_TYPE_HOMOGENEOUS_ASYMMETRIC: case TPL_TYPE_HETEROGENEOUS: default: bar_InternalError( __FILE__, __LINE__); } } static void bar_InitExperiment( exp_Info_t * expInfo) { expInfo->loExpNum = 1; expInfo->hiExpNum = EXPERIMENTS_NUM; expInfo->loBarNum = 1; expInfo->hiBarNum = BARRIERS_NUM; expInfo->loThreadsNum = THREADS_MIN_NUM; expInfo->hiThreadsNum = THREADS_MAX_NUM; expInfo->timer [ EXP_STAGE_REF ].clockId = EXP_CLOCK_ID; expInfo->timer [ EXP_STAGE_EXP ].clockId = EXP_CLOCK_ID; #ifdef DELAYED_PRINT expInfo->currTableLine = 0; #endif bar_SetOnlineCpuSet( ); #ifndef OMP_BARRIER bar_SetParentThreadAffinity( BAR_PARENT_AFFINITY_ONE); #endif machineDescription.machineId = bar_ResolveMachineIdByHostname( HOSTNAME_STR); if ( machineDescription.machineId != UNDEFINED_MACHINE_ID ) { machineDescription.summary = & tpl_MachineSummariesSet [ machineDescription.machineId ]; } else { TOPOLOGY_AWARE_MAPPING = FALSE; } tpl_InitMachineTopology( ); bar_MemInit( ); #if defined( TREE_BARRIER) expInfo->loRadixNum = 2; expInfo->hiRadixNum = RADIX_MAX; if ( (machineDescription.machineId != UNDEFINED_MACHINE_ID) && (machineDescription.summary->maxRadix < expInfo->hiRadixNum) ) { expInfo->hiRadixNum = machineDescription.summary->maxRadix; } #endif } static void bar_PrintUnsupportedConfiguration( ) { fprintf( stderr, "unsupported configuration!\n"); } static inline bool bar_IsUnsupportedConfiguration( ) { #if defined( ARCH_X86_FAMILY) && defined( HWYIELD_SPINNING) if ( sys_GetPrivilegeLevel( ) != X86_RING_0 ) { fprintf( stderr, "HLT requires ring 0 access!\n"); } return TRUE; #endif #if defined( ARCH_X86_FAMILY) && defined( WFE_SPINNING) { int in_eax = 1; int eax; int ebx; int ecx; int edx; x86_Cpuid( in_eax, & eax, & ebx, & ecx, & edx); if ( ecx & (1 << MONITOR_CPUID_ECX_BIT) ) { if ( sys_GetPrivilegeLevel( ) != X86_RING_0 ) { /* FIXME Too conservative deicision. Need to do more precise check. X86 ISA 8.10.3 The instructions are conditionally available at levels greater than 0. Use the following steps to detect the availability of MONITOR and MWAIT: ... */ fprintf( stderr, "MONITOR/MWAIT requires RING 0 access!\n"); return TRUE; } } else { fprintf( stderr, "Processor does not MONITOR/MWAIT!\n"); return TRUE; } } #endif return FALSE; } static void bar_ReadArgs( int argc, const char * argv [ ]) { if ( argc > 1 ) { HOSTNAME_STR = argv [ 1 ]; } else { HOSTNAME_STR = "unknown-host"; } if ( argc > 2 ) { EXP_ID_STR = argv [ 2 ]; } else { EXP_ID_STR = "0"; } if ( argc > 3 ) { if ( !strcmp( argv [ 3 ], "yes") ) { INTERPOLATE_RADIX = TRUE; } else if ( !strcmp( argv [ 3 ], "no") ) { INTERPOLATE_RADIX = FALSE; } else { bar_InternalError( __FILE__, __LINE__); } } else { INTERPOLATE_RADIX = DEF_INTERPOLATE_RADIX; } if ( argc > 4 ) { if ( !strcmp( argv [ 4 ], "yes") ) { TOPOLOGY_AWARE_MAPPING = TRUE; } else if ( !strcmp( argv [ 4 ], "no") ) { TOPOLOGY_AWARE_MAPPING = FALSE; } else { bar_InternalError( __FILE__, __LINE__); } } else { TOPOLOGY_AWARE_MAPPING = DEF_TOPOLOGY_AWARE_MAPPING; } if ( argc > 5 ) { if ( !strcmp( argv [ 5 ], "yes") ) { TOPOLOGY_NUMA_AWARE_ALLOC = TRUE; } else if ( !strcmp( argv [ 5 ], "no") ) { TOPOLOGY_NUMA_AWARE_ALLOC = FALSE; } else { bar_InternalError( __FILE__, __LINE__); } } else { TOPOLOGY_NUMA_AWARE_ALLOC = DEF_TOPOLOGY_NUMA_AWARE_ALLOC; } if ( argc > 6 ) { if ( !strcmp( argv [ 6 ], "yes") ) { USER_DEFINED_ACTIVE_PUS_SELECTION = TRUE; } else if ( !strcmp( argv [ 6 ], "no") ) { USER_DEFINED_ACTIVE_PUS_SELECTION = FALSE; } else { bar_InternalError( __FILE__, __LINE__); } } else { USER_DEFINED_ACTIVE_PUS_SELECTION = DEF_USER_DEFINED_ACTIVE_PUS_SELECTION; } } static void bar_ThreadsCpuOverloadAdjustment( exp_Info_t * expInfo) { #if !defined( PTHREAD_BARRIER) && !defined( PTYIELD_SPINNING) if ( expInfo->curThreadsNum > CPUS_NUM) { expInfo->hiBarNum = BUSY_WAIT_THREAD_CPU_OVERLOAD_BARRIERS_NUM; } #endif #if defined( ARCH_ARM_FAMILY) && defined( PAUSE_SPINNING) expInfo->hiBarNum = BUSY_WAIT_THREAD_CPU_OVERLOAD_BARRIERS_NUM; #endif #if defined( PTHREAD_BARRIER) expInfo->hiBarNum = BARRIERS_NUM / 10; #endif } #if defined( TREE_BARRIER) static void bar_InitRadix( exp_Info_t * expInfo) { expInfo->curRadixNum = expInfo->loRadixNum; } static void bar_IncRadix( exp_Info_t * expInfo) { #ifdef TRNM_BARRIER expInfo->curRadixNum += 1; #else /* TRNM_BARRIER */ if ( expInfo->curRadixNum < RADIX_CONTINIOUS_BOUND ) { expInfo->curRadixNum += 1; } else { expInfo->curRadixNum RADIX_INC; } #endif /* !TRNM_BARRIER */ } static bool bar_ProceedRadix( exp_Info_t * expInfo) { int hiRadixNum = expInfo->hiRadixNum; if ( INTERPOLATE_RADIX && (expInfo->curThreadsNum < expInfo->hiRadixNum) ) { hiRadixNum = expInfo->curThreadsNum; } return ( # ifdef TRNM_BARRIER # ifdef TRNM_STAT_WIN (expInfo->curRadixNum <= (MA_GRANULARITY + 1)) && # else (expInfo->curRadixNum <= MA_GRANULARITY) && # endif # endif (expInfo->curRadixNum <= hiRadixNum)) || ((expInfo->curThreadsNum == 1) && (expInfo->curRadixNum == 2)); } #endif int main( int argc, const char * argv [ ]) { exp_Info_t expInfo; bar_ReadArgs( argc, argv); if ( bar_IsUnsupportedConfiguration( ) ) { return 0; } bar_CheckPreconditions( ); bar_InitExperiment( & expInfo); #ifndef NDEBUG bar_PrintExperimentInfo( & expInfo); #endif #ifndef DELAYED_PRINT bar_PrintTableHeader( ); #endif int lim; for ( expInfo.curThreadsNum = expInfo.loThreadsNum; expInfo.curThreadsNum <= expInfo.hiThreadsNum; lim = expInfo.curThreadsNum, lim THREADS_INC, (lim <= expInfo.hiThreadsNum) ? expInfo.curThreadsNum THREADS_INC : expInfo.curThreadsNum ++ ) { bar_ThreadsCpuOverloadAdjustment( &expInfo); #if defined( TREE_BARRIER) for ( bar_InitRadix( & expInfo); bar_ProceedRadix( & expInfo); bar_IncRadix( & expInfo) ) #endif { /* warming up */ expInfo.expStage = EXP_STAGE_REF; bar_TestBarrier( & expInfo); expInfo.expStage = EXP_STAGE_EXP; bar_TestBarrier( & expInfo); for ( expInfo.curExpNum = expInfo.loExpNum; expInfo.curExpNum <= expInfo.hiExpNum ; expInfo.curExpNum++ ) { expInfo.expStage = EXP_STAGE_REF; bar_TestBarrier( & expInfo); expInfo.expStage = EXP_STAGE_EXP; bar_TestBarrier( & expInfo); #ifdef DELAYED_PRINT bar_SaveTableLine( & expInfo); #else bar_PrintTableLine( & expInfo); #endif } } } #ifdef DELAYED_PRINT bar_PrintTableHeader( ); bar_PrintTableLines( & expInfo); #endif bar_MemFini( ); return 0; }
pooling_hcl_arm_int8.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "pooling_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "utility/float.h" #include "utility/sys_port.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <assert.h> #include <math.h> #include <string.h> #include <arm_neon.h> #define POOL_GENERIC 0 #define POOL_K2S2 1 #define POOL_K3S2 2 #define POOL_K3S1 3 static inline int8_t arm_max_int8(int8_t a, int8_t b) { if (a > b) return a; else return b; } static inline int8_t arm_min_int8(int8_t a, int8_t b) { if (a > b) return b; else return a; } typedef void (*pooling_kernel_int8_t)(const void* input, void* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale); static void pad_0_align_2D_int8(int8_t* dst, int8_t* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(int8_t)); return; } for (i = 0; i < m; ++i) { memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(int8_t)); } } // pad 0 in right and down side on 3D static void pad_0_align_3D_int8(int8_t* dst, int8_t* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(int8_t)); return; } for (i = 0; i < c; ++i) { pad_0_align_2D_int8(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w); } } static void delete_0_2D_int8(int8_t* dst, int8_t* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(int8_t)); return; } for (i = 0; i < m; ++i) { memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(int8_t)); } } // pad 0 in right and down side on 3D static void delete_0_3D_int8(int8_t* dst, int8_t* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(int8_t)); return; } for (i = 0; i < c; ++i) { delete_0_2D_int8(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w); } } static void avg_2x2s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale) { int in_hw = inw * inh; int out_hw = outh * outw; if(pad_w1 > 0) { outw--; } if(pad_h1 > 0) { outh--; } int block_w = outw >> 3; int remain_w = inw - outw * 2; int index = 0; for(int c = 0; c < inc; c++) { index = 0; const int8_t* line0 = input + c * in_hw; const int8_t* line1 = line0 + inw; int8_t* out_ptr = output + c * out_hw; for(int i = 0; i < outh; i++) { for(int j = 0; j < block_w; j++) { int8x8_t p00 = vld1_s8(line0); int8x8_t p10 = vld1_s8(line1); int16x8_t sum0 = vaddl_s8(p00, p10); int8x8_t p01 = vld1_s8(line0 + 8); int8x8_t p11 = vld1_s8(line1 + 8); int16x8_t sum1 = vaddl_s8(p01, p11); #ifdef __aarch64__ /* pairwaise max */ sum0 = vpaddq_s16(sum0, sum1); for(int n = 0; n < 8; n++) { out_ptr[n] = ( int8_t )round(sum0[n] / 4); } #else /* pairwaise max */ int32x4_t suml0 = vpaddlq_s16(sum0); int32x4_t suml1 = vpaddlq_s16(sum1); for(int n = 0; n < 4; n++) { out_ptr[n] = ( int8_t )round(suml0[n] / 4); out_ptr[n + 1] = ( int8_t )round(suml1[n] / 4); } #endif line0 += 16; out_ptr = out_ptr + 8; index = index + 8; } index = block_w * 8; if(outw - index >= 4) { int8x8_t p00 = vld1_s8(line0); int8x8_t p10 = vld1_s8(line1); int16x8_t sum0 = vaddl_s8(p00, p10); #ifdef __aarch64__ /* pairwaise max */ int16x8_t sum1 = {0}; sum0 = vpaddq_s16(sum0, sum1); for(int n = 0; n < 4; n++) { out_ptr[n] = ( int8_t )round(sum0[n] / 4); } #else /* pairwaise max */ int32x4_t suml0 = vpaddlq_s16(sum0); for(int n = 0; n < 4; n++) { out_ptr[n] = ( int8_t )round(suml0[n] / 4); } #endif line0 += 8; out_ptr = out_ptr + 4; index = index + 4; } for(; index < outw; index++) { *out_ptr = ( int8_t )round((line0[0] + line0[1] + line1[0] + line1[1]) / 4); out_ptr++; line0 += 2; line1 += 2; } if(pad_w1 > 0) { *out_ptr = ( int8_t )round((line0[0] + line1[0]) / 2); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } if(pad_h1) { index = 0; for(int j = 0; j < block_w; j++) { int8x8_t p00 = vld1_s8(line0); int8x8_t p01 = vld1_s8(line0 + 8); int8x8_t p02 = {0}; /* pairwaise max */ int16x8_t sum0 = vaddl_s8(p00, p02); int16x8_t sum1 = vaddl_s8(p01, p02); #ifdef __aarch64__ sum0 = vpaddq_s16(sum0, sum1); for(int n = 0; n < 8; n++) { out_ptr[n] = ( int8_t )round(sum0[n] / 4); } #else int32x4_t suml0 = vpaddlq_s16(sum0); int32x4_t suml1 = vpaddlq_s16(sum1); for(int n = 0; n < 4; n++) { out_ptr[n] = ( int8_t )round(suml0[n] / 4); out_ptr[n + 1] = ( int8_t )round(suml1[n] / 4); } #endif line0 += 16; out_ptr = out_ptr + 8; index = index + 8; } index = block_w * 8; if(outw - index >= 4) { int8x8_t p00 = vld1_s8(line0); int8x8_t p01 = {0}; int16x8_t sum0 = vaddl_s8(p00, p01); #ifdef __aarch64__ /* pairwaise max */ int16x8_t sum1 = {0}; sum0 = vpaddq_s16(sum0, sum1); for(int n = 0; n < 4; n++) { out_ptr[n] = ( int8_t )round(sum0[n] / 4); } #else /* pairwaise max */ int32x4_t suml0 = vpaddlq_s16(sum0); for(int n = 0; n < 4; n++) { out_ptr[n] = ( int8_t )round(suml0[n] / 4); } #endif line0 += 8; out_ptr = out_ptr + 4; index = index + 4; } for(; index < outw; index++) { int sum0 = line0[0] + line0[1]; *out_ptr = ( int8_t )round((sum0) / 2); out_ptr++; line0 += 2; line1 += 2; } if(pad_w1 > 0) { *out_ptr = line0[0]; out_ptr++; } } } } static void max_2x2s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale) { int in_hw = inw * inh; int out_hw = outh * outw; if(pad_w1 > 0) { outw--; } if(pad_h1 > 0) { outh--; } #ifdef __aarch64__ int block_w = outw >> 4; #else int block_w = outw >> 3; #endif int remain_w = inw - outw * 2; int index = 0; for(int c = 0; c < inc; c++) { const int8_t* line0 = input + c * in_hw; const int8_t* line1 = line0 + inw; int8_t* out_ptr = output + c * out_hw; for(int i = 0; i < outh; i++) { for(int j = 0; j < block_w; j++) { #ifdef __aarch64__ int8x16_t p00 = vld1q_s8(line0); int8x16_t p10 = vld1q_s8(line1); int8x16_t max0 = vmaxq_s8(p00, p10); int8x16_t p01 = vld1q_s8(line0 + 16); int8x16_t p11 = vld1q_s8(line1 + 16); int8x16_t max1 = vmaxq_s8(p01, p11); /* pairwaise max */ int8x16_t _max = vpmaxq_s8(max0, max1); vst1q_s8(out_ptr, _max); line0 += 32; line1 += 32; out_ptr += 16; } index = block_w * 16; #else int8x8_t p00 = vld1_s8(line0); int8x8_t p10 = vld1_s8(line1); int8x8_t max0 = vmax_s8(p00, p10); int8x8_t p01 = vld1_s8(line0 + 8); int8x8_t p11 = vld1_s8(line1 + 8); int8x8_t max1 = vmax_s8(p01, p11); /* pairwaise max */ int8x8_t _max = vpmax_s8(max0, max1); vst1_s8(out_ptr, _max); line0 += 16; line1 += 16; out_ptr += 8; } index = block_w * 8; #endif if(outw - index >= 8) { int8x8_t p00 = vld1_s8(line0); int8x8_t p10 = vld1_s8(line1); int8x8_t max0 = vmax_s8(p00, p10); int8x8_t p01 = vld1_s8(line0 + 8); int8x8_t p11 = vld1_s8(line1 + 8); int8x8_t max1 = vmax_s8(p01, p11); /* pairwaise max */ int8x8_t _max = vpmax_s8(max0, max1); vst1_s8(out_ptr, _max); line0 += 16; line1 += 16; out_ptr = out_ptr + 8; index = index + 8; } if(outw - index >= 4) { int8x8_t p00 = vld1_s8(line0); int8x8_t p10 = vld1_s8(line1); int8x8_t max0 = vmax_s8(p00, p10); /* pairwaise max */ int8x8_t max1 = {0}; int8x8_t _max = vpmax_s8(max0, max1); out_ptr[0] = _max[0]; out_ptr[1] = _max[1]; out_ptr[2] = _max[2]; out_ptr[3] = _max[3]; line0 += 8; line1 += 8; out_ptr = out_ptr + 4; index = index + 4; } for(; index < outw; index++) { int8_t max0 = arm_max_int8(line0[0], line0[1]); int8_t max1 = arm_max_int8(line1[0], line1[1]); *out_ptr = arm_max_int8(max0, max1); out_ptr++; line0 += 2; line1 += 2; } if(pad_w1 > 0) { *out_ptr = arm_max_int8(line0[0], line1[0]); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; } if(pad_h1 > 0) { for(int j = 0; j < block_w; j++) { #ifdef __aarch64__ int8x16_t p00 = vld1q_s8(line0); int8x16_t p01 = vld1q_s8(line0 + 16); /* pairwaise max */ int8x16_t _max = vpmaxq_s8(p00, p01); vst1q_s8(out_ptr, _max); line0 += 32; out_ptr += 16; } index = block_w * 16; #else int8x8_t p00 = vld1_s8(line0); int8x8_t p01 = vld1_s8(line0 + 8); /* pairwaise max */ int8x8_t _max = vpmax_s8(p00, p01); vst1_s8(out_ptr, _max); line0 += 16; out_ptr += 8; } index = block_w * 8; #endif if(outw - index >= 8) { int8x8_t p00 = vld1_s8(line0); int8x8_t p01 = vld1_s8(line0 + 8); /* pairwaise max */ int8x8_t _max = vpmax_s8(p00, p01); vst1_s8(out_ptr, _max); line0 += 16; out_ptr = out_ptr + 8; index = index + 8; } if(outw - index >= 4) { int8x8_t p00 = vld1_s8(line0); /* pairwaise max */ int8x8_t p01 = {0}; int8x8_t _max = vpmax_s8(p00, p01); out_ptr[0] = _max[0]; out_ptr[1] = _max[1]; out_ptr[2] = _max[2]; out_ptr[3] = _max[3]; line0 += 8; out_ptr = out_ptr + 4; index = index + 4; } for(; index < outw; index++) { *out_ptr = arm_max_int8(line0[0], line0[1]); out_ptr++; line0 += 2; } if(pad_w1 > 0) { *out_ptr = arm_max_int8(line0[0], line1[0]); out_ptr++; } } } } static void avg_3x3s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale) { int in_hw = inw * inh; int out_hw = outh * outw; if(pad_w1 > 0) { outw--; } if(pad_h1 > 0) { outh--; } int block_w = outw >> 3; int remain_w = inw - outw * 2; int index = 0; for(int c = 0; c < inc; c++) { const int8_t* line0 = input + c * in_hw; const int8_t* line1 = line0 + inw; const int8_t* line2 = line1 + inw; int8_t* out_ptr = output + c * out_hw; for(int i = 0; i < outh; i++) { index = 0; for(int j = 0; j < block_w; j++) { int8x8x2_t p00 = vld2_s8(line0); int8x8x2_t p10 = vld2_s8(line1); int8x8x2_t p20 = vld2_s8(line2); int8x8x2_t p00_new = vld2_s8(line0 + 16); int16x8_t sum0 = vaddl_s8(p00.val[0], p00.val[1]); int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1); sum0 = vaddw_s8(sum0, p01); int8x8x2_t p10_new = vld2_s8(line1 + 16); sum0 = vaddw_s8(sum0, p10.val[0]); sum0 = vaddw_s8(sum0, p10.val[1]); int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1); sum0 = vaddw_s8(sum0, p11); int8x8x2_t p20_new = vld2_s8(line2 + 16); sum0 = vaddw_s8(sum0, p20.val[0]); sum0 = vaddw_s8(sum0, p20.val[1]); int8x8_t p21 = vext_s8(p20.val[0], p20_new.val[0], 1); sum0 = vaddw_s8(sum0, p21); // sum0 = vadd_s8(vadd_s8(sum0, sum1), sum2); for(int n = 0; n < 8; n++) { out_ptr[n] = ( int8_t )round(sum0[n] / 9); } p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 16; line1 += 16; line2 += 16; out_ptr += 8; index = index + 8; } for(; index < outw; index++) { int sum = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2] + line2[0] + line2[1] + line2[2]); *out_ptr = ( int8_t )round(sum / 9); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if(pad_w1 == 1) { int sum = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]); *out_ptr = ( int8_t )round(sum / 6); out_ptr++; } else if(pad_w1 == 2) { int sum = (line0[0] + line1[0] + line2[0]); *out_ptr = ( int8_t )round(sum / 6); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } if(pad_h1 == 1) { index = 0; for(int j = 0; j < block_w; j++) { int8x8x2_t p00 = vld2_s8(line0); int8x8x2_t p10 = vld2_s8(line1); int8x8x2_t p00_new = vld2_s8(line0 + 16); int16x8_t sum0 = vaddl_s8(p00.val[0], p00.val[1]); int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1); sum0 = vaddw_s8(sum0, p01); int8x8x2_t p10_new = vld2_s8(line1 + 16); sum0 = vaddw_s8(sum0, p10.val[0]); sum0 = vaddw_s8(sum0, p10.val[1]); int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1); sum0 = vaddw_s8(sum0, p11); for(int n = 0; n < 8; n++) { out_ptr[n] = ( int8_t )round(sum0[n] / 6); } p00 = p00_new; p10 = p10_new; line0 += 16; line1 += 16; out_ptr += 8; index = index + 8; } for(; index < outw; index++) { int sum = (line0[0] + line0[1] + line0[2] + line1[0] + line1[1] + line1[2]); *out_ptr = ( int8_t )round(sum / 6); out_ptr++; line0 += 2; line1 += 2; } if(pad_w1 == 1) { int sum = (line0[0] + line0[1] + line1[0] + line1[1]); *out_ptr = ( int8_t )round(sum / 4); out_ptr++; } else if(pad_w1 == 2) { int sum = (line0[0] + line1[0]); *out_ptr = ( int8_t )round(sum / 2); out_ptr++; } } else if(pad_h1 == 2) { index = 0; for(int j = 0; j < block_w; j++) { int8x8x2_t p00 = vld2_s8(line0); int8x8x2_t p00_new = vld2_s8(line0 + 16); int16x8_t sum0 = vaddl_s8(p00.val[0], p00.val[1]); int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1); sum0 = vaddw_s8(sum0, p01); for(int n = 0; n < 8; n++) { out_ptr[n] = ( int8_t )round(sum0[n] / 3); } p00 = p00_new; line0 += 16; out_ptr += 8; index = index + 8; } for(; index < outw; index++) { *out_ptr = ( int8_t )round((line0[0] + line0[1] + line0[2]) / 3); out_ptr++; line0 += 2; } if(pad_w1 == 1) { *out_ptr = ( int8_t )round((line0[0] + line0[1]) / 2); out_ptr++; } else if(pad_w1 == 2) { *out_ptr = line0[0]; out_ptr++; } } } } static void max_3x3s2_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale) { int in_hw = inw * inh; int out_hw = outh * outw; if(pad_w1 > 0) { outw--; } if(pad_h1 > 0) { outh--; } int block_w = outw >> 4; int remain_w = inw - outw * 2; int index = 0; for(int c = 0; c < inc; c++) { const int8_t* line0 = input + c * in_hw; const int8_t* line1 = line0 + inw; const int8_t* line2 = line1 + inw; int8_t* out_ptr = output + c * out_hw; for(int i = 0; i < outh; i++) { int8x16x2_t p00 = vld2q_s8(line0); int8x16x2_t p10 = vld2q_s8(line1); int8x16x2_t p20 = vld2q_s8(line2); for(int j = 0; j < block_w; j++) { /* p00 = [1,2,3,4,5,6,7,8...] p00.val[0]=[1,3,5,7...] max0 = [2,4,6,8...] p00_new = [9,10,11,12,13,14,15,16...] p01 = [3,5,7,9...] max0=max(max0,p01)=[3,5,7,9] */ int8x16x2_t p00_new = vld2q_s8(line0 + 32); int8x16_t max0 = vmaxq_s8(p00.val[0], p00.val[1]); int8x16_t p01 = vextq_s8(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_s8(max0, p01); int8x16x2_t p10_new = vld2q_s8(line1 + 32); int8x16_t max1 = vmaxq_s8(p10.val[0], p10.val[1]); int8x16_t p11 = vextq_s8(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_s8(max1, p11); int8x16x2_t p20_new = vld2q_s8(line2 + 32); int8x16_t max2 = vmaxq_s8(p20.val[0], p20.val[1]); int8x16_t p21 = vextq_s8(p20.val[0], p20_new.val[0], 1); max2 = vmaxq_s8(max2, p21); max0 = vmaxq_s8(vmaxq_s8(max0, max1), max2); vst1q_s8(out_ptr, max0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 32; line1 += 32; line2 += 32; out_ptr += 16; } index = block_w * 16; if(outw - index > 8) { int8x8x2_t p00 = vld2_s8(line0); int8x8x2_t p10 = vld2_s8(line1); int8x8x2_t p20 = vld2_s8(line2); int8x8x2_t p00_new = vld2_s8(line0 + 16); int8x8_t max0 = vmax_s8(p00.val[0], p00.val[1]); int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1); max0 = vmax_s8(max0, p01); int8x8x2_t p10_new = vld2_s8(line1 + 16); int8x8_t max1 = vmax_s8(p10.val[0], p10.val[1]); int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1); max1 = vmax_s8(max1, p11); int8x8x2_t p20_new = vld2_s8(line2 + 16); int8x8_t max2 = vmax_s8(p20.val[0], p20.val[1]); int8x8_t p21 = vext_s8(p20.val[0], p20_new.val[0], 1); max2 = vmax_s8(max2, p21); max0 = vmax_s8(vmax_s8(max0, max1), max2); vst1_s8(out_ptr, max0); p00 = p00_new; p10 = p10_new; p20 = p20_new; line0 += 16; line1 += 16; line2 += 16; out_ptr += 8; index = index + 8; } for(; index < outw; index++) { int8_t max0 = arm_max_int8(arm_max_int8(line0[0], line0[1]), line0[2]); int8_t max1 = arm_max_int8(arm_max_int8(line1[0], line1[1]), line1[2]); int8_t max2 = arm_max_int8(arm_max_int8(line2[0], line2[1]), line2[2]); *out_ptr = arm_max_int8(arm_max_int8(max0, max1), max2); out_ptr++; line0 += 2; line1 += 2; line2 += 2; } if(pad_w1 == 1) { int8_t max0 = arm_max_int8(arm_max_int8(line0[0], line0[1]), arm_max_int8(line1[0], line1[1])); *out_ptr = arm_max_int8(arm_max_int8(line2[0], line2[1]), max0); out_ptr++; } line0 += remain_w + inw; line1 += remain_w + inw; line2 += remain_w + inw; } if(pad_h1 == 1) { int8x16x2_t p00 = vld2q_s8(line0); int8x16x2_t p10 = vld2q_s8(line1); for(int j = 0; j < block_w; j++) { int8x16x2_t p00_new = vld2q_s8(line0 + 32); int8x16_t max0 = vmaxq_s8(p00.val[0], p00.val[1]); int8x16_t p01 = vextq_s8(p00.val[0], p00_new.val[0], 1); max0 = vmaxq_s8(max0, p01); int8x16x2_t p10_new = vld2q_s8(line1 + 32); int8x16_t max1 = vmaxq_s8(p10.val[0], p10.val[1]); int8x16_t p11 = vextq_s8(p10.val[0], p10_new.val[0], 1); max1 = vmaxq_s8(max1, p11); max0 = vmaxq_s8(max0, max1); vst1q_s8(out_ptr, max0); p00 = p00_new; p10 = p10_new; line0 += 32; line1 += 32; out_ptr += 16; } index = block_w * 16; if(outw - index > 8) { int8x8x2_t p00 = vld2_s8(line0); int8x8x2_t p10 = vld2_s8(line1); int8x8x2_t p00_new = vld2_s8(line0 + 16); int8x8_t max0 = vmax_s8(p00.val[0], p00.val[1]); int8x8_t p01 = vext_s8(p00.val[0], p00_new.val[0], 1); max0 = vmax_s8(max0, p01); int8x8x2_t p10_new = vld2_s8(line1 + 16); int8x8_t max1 = vmax_s8(p10.val[0], p10.val[1]); int8x8_t p11 = vext_s8(p10.val[0], p10_new.val[0], 1); max1 = vmax_s8(max1, p11); max0 = vmax_s8(max0, max1); vst1_s8(out_ptr, max0); p00 = p00_new; p10 = p10_new; line0 += 16; line1 += 16; out_ptr += 8; index = index + 8; } for(; index < outw; index++) { int8_t max0 = arm_max_int8(arm_max_int8(line0[0], line0[1]), line0[2]); int8_t max1 = arm_max_int8(arm_max_int8(line1[0], line1[1]), line1[2]); *out_ptr = arm_max_int8(max0, max1); out_ptr++; line0 += 2; line1 += 2; } if(pad_w1 == 1) { *out_ptr = arm_max_int8(arm_max_int8(line0[0], line0[1]), arm_max_int8(line1[0], line1[1])); out_ptr++; } } } } static void avg_global_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale) { int in_hw = inw * inh; int block = in_hw >> 4; for(int c = 0; c < inc; c++) { int index = 0; const int8_t* line0 = input + c * in_hw; int8_t* out_ptr = output + c; int sum = 0; for(int j = 0; j < block; j++) { int8x8_t p00 = vld1_s8(line0); int8x8_t p01 = vld1_s8(line0 + 8); int16x8_t pls = vaddl_s8(p00, p01); int32x4_t tmp = vpaddlq_s16(pls); sum += vgetq_lane_s32(tmp, 0) + vgetq_lane_s32(tmp, 1) + vgetq_lane_s32(tmp, 2) + vgetq_lane_s32(tmp, 3); line0 += 16; } index = block * 16; for(int j = index; j < in_hw; j++) { sum += line0[0]; line0++; } float sum_fp32 = sum * in_scale; sum_fp32 = sum_fp32/in_hw; int tmp = (int)round(sum_fp32/out_scale); if(tmp > 127) tmp = 127; else if(tmp < -127) tmp = -127; *out_ptr = ( int8_t )tmp;//round(sum / in_hw); } } static void max_global_int8(const int8_t* input, int8_t* output, int inc, int inh, int inw, int outh, int outw, int k_h, int k_w, int s_h, int s_w, int pad_h0, int pad_w0, int pad_h1, int pad_w1, int is_caffe, float in_scale,float out_scale) { int in_hw = inw * inh; int block = in_hw >> 5; for(int c = 0; c < inc; c++) { int index = 0; const int8_t* line0 = input + c * in_hw; int8_t* out_ptr = output + c; int8x16_t p00 = vld1q_s8(line0); int8x16_t res = p00; for(int j = 0; j < block; j++) { int8x16_t p00 = vld1q_s8(line0); int8x16_t p01 = vld1q_s8(line0 + 16); int8x16_t max0 = vmaxq_s8(p00, p01); res = vmaxq_s8(res, max0); line0 += 32; } int8_t max_ = 0; if(block > 0) { max_ = res[0]; #ifdef __aarch64__ for(int n = 1; n < 16; n++) { max_ = arm_max_int8(max_, res[n]); } #else max_ = arm_max_int8(max_, vgetq_lane_s8(res, 0)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 1)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 2)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 3)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 4)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 5)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 6)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 7)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 8)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 9)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 10)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 11)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 12)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 13)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 14)); max_ = arm_max_int8(max_, vgetq_lane_s8(res, 15)); #endif } else { max_ = line0[0]; } index = block * 32; for(int j = index; j < in_hw; j++) { max_ = arm_max_int8(max_, line0[0]); line0++; } *out_ptr = max_; } } int pooling_kernel_int8_perf_prerun(struct tensor* input, struct tensor* out, struct pool_param* param) { int pool_size = POOL_GENERIC; /* global pooling */ if (param->global) { if (param->pool_method == POOL_AVG) param->funct = ( pooling_kernel_int8_t )avg_global_int8; else if (param->pool_method == POOL_MAX) param->funct = ( pooling_kernel_int8_t )max_global_int8; assert(param->funct != NULL); return 0; } /* general pooling */ if (param->stride_h == 2 && param->stride_w == 2) { if (param->kernel_h == 2 && param->kernel_w == 2) pool_size = POOL_K2S2; else if (param->kernel_h == 3 && param->kernel_w == 3) pool_size = POOL_K3S2; } /* general max pooling, k2s2, k2k2p1, k3s1p1, k3s2, k3s2p1 */ if (param->pool_method == POOL_MAX) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_int8_t )max_2x2s2_int8; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_int8_t )max_3x3s2_int8; } } /* general avg pooling, k2s2, k2s2p1, k3s2, k3s2p1 */ else if (param->pool_method == POOL_AVG) { if ((param->pad_h0 == param->pad_w0) && (param->pad_h1 == param->pad_w1)) { if (pool_size == POOL_K2S2) param->funct = ( pooling_kernel_int8_t )avg_2x2s2_int8; else if (pool_size == POOL_K3S2) param->funct = ( pooling_kernel_int8_t )avg_3x3s2_int8; } } if (param->funct == NULL) { TLOG_ERR("perf pooling func not be find\n"); return -1; } return 0; } int pooling_kernel_int8_perf_run(struct tensor* input, struct tensor* output, struct pool_param* param, int num_thread) { int is_caffe = param->caffe_flavor; pooling_kernel_int8_t kernel = (pooling_kernel_int8_t)(param->funct); int batch = input->dims[0]; int c = input->dims[1]; int in_h = input->dims[2]; int in_w = input->dims[3]; int out_h = output->dims[2]; int out_w = output->dims[3]; int pad_h0 = param->pad_h0; int pad_h1 = param->pad_h1; int pad_w0 = param->pad_w0; int pad_w1 = param->pad_w1; int in_h_origin = in_h; int in_w_origin = in_w; int in_h_pad = in_h + pad_h0; int in_w_pad = in_w + pad_w0; int img_size = c * in_h * in_w; int feature_size = c * out_h * out_w; float input_scale = input->scale; float output_scale = output->scale; if (param->input_pad != NULL) { param->pad_h0 = 0; param->pad_w0 = 0; in_h += 1; in_w += 1; } for (int n = 0; n < batch; n++) { void* input_frame = input->data + n * img_size * input->elem_size; void* output_frame = output->data + n * feature_size * output->elem_size; if (param->input_pad != NULL) { pad_0_align_3D_int8((int8_t*)param->input_pad + n * c * in_h_pad * in_w_pad, (int8_t*)input_frame, in_h_origin, in_w_origin, in_h_pad, in_w_pad, c, pad_h0, pad_w0); } #pragma omp parallel for num_threads(num_thread) for (int ch = 0; ch < c; ch++) { void* cur_input = NULL; if (param->input_pad != NULL) { cur_input = param->input_pad + ch * in_h_pad * in_w_pad * input->elem_size; } else { cur_input = input_frame + ch * in_h * in_w * input->elem_size; } void* cur_output = output_frame + ch * out_h * out_w * output->elem_size; kernel(cur_input, cur_output, 1, in_h, in_w, out_h, out_w, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->pad_h1, param->pad_w1, is_caffe, input_scale, output_scale); } } return 0; }
productoParalelo.c
//Aguilar Luna Gabriel Daniel //El archivo lleva a cabo el producto punto de dos vectores, de longitud variable, con procesamiento en paralelo #include<stdio.h> #include<stdlib.h> #include<time.h> #include<omp.h> void prodpunto(int*,int*,int); void funcionArr(int*,int); int main() { srand(time(NULL)); int n=rand()%11+1; printf("Tamaño de los vectores: %d\n",n); int vector1[n],vector2[n]; funcionArr(vector1,n); funcionArr(vector2,n); prodpunto(vector1,vector2,n); } void funcionArr(int* vector,int n) { for(int i=0;i<n;i++) { vector[i]=rand()%11; printf("%4d", vector[i]); } printf("\n"); } void prodpunto(int* vect1, int* vect2, int n){ int i,tid,nth; int res=0, resp[n]; #pragma omp parallel for reduction(+:res) for (i = 0; i < n; ++i){ res+=vect1[i]*vect2[i]; printf("%3d", vect1[i]*vect2[i]); if (i+1<n){ printf(" +"); } } printf(" =%d\n", res); }
libperf_int.h
/** * Copyright (C) Mellanox Technologies Ltd. 2001-2015. ALL RIGHTS RESERVED. * Copyright (C) The University of Tennessee and The University * of Tennessee Research Foundation. 2016. ALL RIGHTS RESERVED. * * See file LICENSE for terms. */ #ifndef LIBPERF_INT_H_ #define LIBPERF_INT_H_ #include <tools/perf/api/libperf.h> BEGIN_C_DECLS /** @file libperf_int.h */ #include <ucs/async/async.h> #include <ucs/time/time.h> #include <ucs/sys/math.h> #if _OPENMP #include <omp.h> #endif #define TIMING_QUEUE_SIZE 2048 #define UCT_PERF_TEST_AM_ID 5 #define ADDR_BUF_SIZE 2048 #define UCX_PERF_TEST_FOREACH(perf) \ while (!ucx_perf_context_done(perf)) #define rte_call(_perf, _func, ...) \ ((_perf)->params.rte->_func((_perf)->params.rte_group, ## __VA_ARGS__)) typedef struct ucx_perf_context ucx_perf_context_t; typedef struct uct_peer uct_peer_t; typedef struct ucp_perf_request ucp_perf_request_t; typedef struct ucx_perf_thread_context ucx_perf_thread_context_t; struct ucx_perf_allocator { ucs_memory_type_t mem_type; ucs_status_t (*init)(ucx_perf_context_t *perf); ucs_status_t (*ucp_alloc)(const ucx_perf_context_t *perf, size_t length, void **address_p, ucp_mem_h *memh, int non_blk_flag); void (*ucp_free)(const ucx_perf_context_t *perf, void *address, ucp_mem_h memh); ucs_status_t (*uct_alloc)(const ucx_perf_context_t *perf, size_t length, unsigned flags, uct_allocated_memory_t *alloc_mem); void (*uct_free)(const ucx_perf_context_t *perf, uct_allocated_memory_t *alloc_mem); void (*memcpy)(void *dst, ucs_memory_type_t dst_mem_type, const void *src, ucs_memory_type_t src_mem_type, size_t count); void* (*memset)(void *dst, int value, size_t count); }; struct ucx_perf_context { ucx_perf_params_t params; /* Buffers */ void *send_buffer; void *recv_buffer; /* Measurements */ double start_time_acc; /* accurate start time */ ucs_time_t end_time; /* inaccurate end time (upper bound) */ ucs_time_t prev_time; /* time of previous iteration */ ucs_time_t report_interval; /* interval of showing report */ ucx_perf_counter_t max_iter; /* Measurements of current/previous **report** */ struct { ucx_perf_counter_t msgs; /* number of messages */ ucx_perf_counter_t bytes; /* number of bytes */ ucx_perf_counter_t iters; /* number of iterations */ ucs_time_t time; /* inaccurate time (for median and report interval) */ double time_acc; /* accurate time (for avg latency/bw/msgrate) */ } current, prev; ucs_time_t timing_queue[TIMING_QUEUE_SIZE]; unsigned timing_queue_head; const ucx_perf_allocator_t *allocator; union { struct { ucs_async_context_t async; uct_component_h cmpt; uct_md_h md; uct_worker_h worker; uct_iface_h iface; uct_peer_t *peers; uct_allocated_memory_t send_mem; uct_allocated_memory_t recv_mem; uct_iov_t *iov; } uct; struct { ucp_context_h context; ucx_perf_thread_context_t* tctx; ucp_worker_h worker; ucp_ep_h ep; ucp_rkey_h rkey; unsigned long remote_addr; ucp_mem_h send_memh; ucp_mem_h recv_memh; ucp_dt_iov_t *send_iov; ucp_dt_iov_t *recv_iov; void *am_hdr; } ucp; }; }; struct ucx_perf_thread_context { pthread_t pt; int tid; ucs_status_t status; ucx_perf_context_t perf; ucx_perf_result_t result; }; struct uct_peer { uct_ep_h ep; unsigned long remote_addr; uct_rkey_bundle_t rkey; }; struct ucp_perf_request { void *context; }; typedef struct { ucs_status_t (*setup)(ucx_perf_context_t *perf); void (*cleanup)(ucx_perf_context_t *perf); ucs_status_t (*run)(ucx_perf_context_t *perf); void (*barrier)(ucx_perf_context_t *perf); } ucx_perf_funcs_t; extern ucx_perf_funcs_t ucx_perf_funcs[]; unsigned rte_peer_index(unsigned group_size, unsigned group_index); void ucx_perf_test_start_clock(ucx_perf_context_t *perf); void uct_perf_ep_flush_b(ucx_perf_context_t *perf, int peer_index); void uct_perf_iface_flush_b(ucx_perf_context_t *perf); ucs_status_t uct_perf_test_dispatch(ucx_perf_context_t *perf); ucs_status_t ucp_perf_test_dispatch(ucx_perf_context_t *perf); void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result); void uct_perf_barrier(ucx_perf_context_t *perf); void ucp_perf_thread_barrier(ucx_perf_context_t *perf); void ucp_perf_barrier(ucx_perf_context_t *perf); ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf); void ucp_perf_test_free_mem(ucx_perf_context_t *perf); ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf); void uct_perf_test_free_mem(ucx_perf_context_t *perf); ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf, ucx_perf_result_t* result); void ucx_perf_test_prepare_new_run(ucx_perf_context_t *perf, const ucx_perf_params_t *params); void ucx_perf_set_warmup(ucx_perf_context_t* perf, const ucx_perf_params_t* params); /** * Get the total length of the message size given by parameters */ size_t ucx_perf_get_message_size(const ucx_perf_params_t *params); static UCS_F_ALWAYS_INLINE int ucx_perf_context_done(ucx_perf_context_t *perf) { return ucs_unlikely((perf->current.iters >= perf->max_iter) || (perf->current.time > perf->end_time)); } static inline void ucx_perf_get_time(ucx_perf_context_t *perf) { perf->current.time_acc = ucs_get_accurate_time(); } static inline void ucx_perf_omp_barrier(ucx_perf_context_t *perf) { #if _OPENMP if (perf->params.thread_count > 1) { #pragma omp barrier } #endif } static inline void ucx_perf_update(ucx_perf_context_t *perf, ucx_perf_counter_t iters, size_t bytes) { ucx_perf_result_t result; perf->current.time = ucs_get_time(); perf->current.iters += iters; perf->current.bytes += bytes; perf->current.msgs += 1; perf->timing_queue[perf->timing_queue_head] = perf->current.time - perf->prev_time; ++perf->timing_queue_head; if (perf->timing_queue_head == TIMING_QUEUE_SIZE) { perf->timing_queue_head = 0; } perf->prev_time = perf->current.time; if (perf->current.time - perf->prev.time >= perf->report_interval) { ucx_perf_get_time(perf); ucx_perf_calc_result(perf, &result); rte_call(perf, report, &result, perf->params.report_arg, 0, 0); perf->prev = perf->current; } } END_C_DECLS #endif
JeeIOrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory ////////////////////////////////////////////////////////////////////////////////////// #ifndef QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H #include "Configuration.h" #if !defined(QMC_BUILD_SANDBOX_ONLY) #include "QMCWaveFunctions/WaveFunctionComponent.h" #endif #include "Particle/DistanceTableData.h" #include <simd/allocator.hpp> #include <simd/algorithm.hpp> #include <map> #include <numeric> namespace qmcplusplus { /** @ingroup WaveFunctionComponent * @brief Specialization for three-body Jastrow function using multiple functors * *Each pair-type can have distinct function \f$u(r_{ij})\f$. *For electrons, distinct pair correlation functions are used *for spins up-up/down-down and up-down/down-up. */ template<class FT> class JeeIOrbitalSoA : public WaveFunctionComponent { ///type of each component U, dU, d2U; using valT = typename FT::real_type; ///element position type using posT = TinyVector<valT, OHMMS_DIM>; ///use the same container using RowContainer = DistanceTableData::RowContainer; ///table index for el-el const int ee_Table_ID_; ///table index for i-el const int ei_Table_ID_; //nuber of particles int Nelec, Nion; ///number of particles + padded size_t Nelec_padded; //number of groups of the target particleset int eGroups, iGroups; ///reference to the sources (ions) const ParticleSet& Ions; ///diff value RealType DiffVal; ///\f$Uat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Uat, oldUk, newUk; ///\f$dUat[i] = sum_(j) du_{i,j}\f$ using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>; gContainer_type dUat, olddUk, newdUk; ///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$ Vector<valT> d2Uat, oldd2Uk, newd2Uk; /// current values during PbyP valT cur_Uat, cur_d2Uat; posT cur_dUat, dUat_temp; ///container for the Jastrow functions Array<FT*, 3> F; std::map<std::string, FT*> J3Unique; //YYYY std::map<FT*, int> J3UniqueIndex; /// the cutoff for e-I pairs std::vector<valT> Ion_cutoff; /// the electrons around ions within the cutoff radius, grouped by species Array<std::vector<int>, 2> elecs_inside; Array<std::vector<valT>, 2> elecs_inside_dist; Array<std::vector<posT>, 2> elecs_inside_displ; /// the ids of ions within the cutoff radius of an electron on which a move is proposed std::vector<int> ions_nearby_old, ions_nearby_new; /// work buffer size size_t Nbuffer; /// compressed distances aligned_vector<valT> Distjk_Compressed, DistkI_Compressed, DistjI_Compressed; std::vector<int> DistIndice_k; /// compressed displacements gContainer_type Disp_jk_Compressed, Disp_jI_Compressed, Disp_kI_Compressed; /// work result buffer VectorSoaContainer<valT, 9> mVGL; // Used for evaluating derivatives with respect to the parameters int NumVars; Array<std::pair<int, int>, 3> VarOffset; Vector<RealType> dLogPsi; Array<PosType, 2> gradLogPsi; Array<RealType, 2> lapLogPsi; // Temporary store for parameter derivatives of functor // The first index is the functor index in J3Unique. The second is the parameter index w.r.t. to that // functor std::vector<std::vector<RealType>> du_dalpha; std::vector<std::vector<PosType>> dgrad_dalpha; std::vector<std::vector<Tensor<RealType, 3>>> dhess_dalpha; public: ///alias FuncType using FuncType = FT; JeeIOrbitalSoA(const ParticleSet& ions, ParticleSet& elecs, bool is_master = false) : ee_Table_ID_(elecs.addTable(elecs, DT_SOA)), ei_Table_ID_(elecs.addTable(ions, DT_SOA, true)), Ions(ions), NumVars(0) { ClassName = "JeeIOrbitalSoA"; init(elecs); } ~JeeIOrbitalSoA() {} WaveFunctionComponentPtr makeClone(ParticleSet& elecs) const { JeeIOrbitalSoA<FT>* eeIcopy = new JeeIOrbitalSoA<FT>(Ions, elecs, false); std::map<const FT*, FT*> fcmap; for (int iG = 0; iG < iGroups; iG++) for (int eG1 = 0; eG1 < eGroups; eG1++) for (int eG2 = 0; eG2 < eGroups; eG2++) { if (F(iG, eG1, eG2) == 0) continue; typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F(iG, eG1, eG2)); if (fit == fcmap.end()) { FT* fc = new FT(*F(iG, eG1, eG2)); eeIcopy->addFunc(iG, eG1, eG2, fc); fcmap[F(iG, eG1, eG2)] = fc; } } // Ye: I don't like the following memory allocated by default. eeIcopy->myVars.clear(); eeIcopy->myVars.insertFrom(myVars); eeIcopy->NumVars = NumVars; eeIcopy->dLogPsi.resize(NumVars); eeIcopy->gradLogPsi.resize(NumVars, Nelec); eeIcopy->lapLogPsi.resize(NumVars, Nelec); eeIcopy->VarOffset = VarOffset; eeIcopy->Optimizable = Optimizable; return eeIcopy; } void init(ParticleSet& p) { Nelec = p.getTotalNum(); Nelec_padded = getAlignedSize<valT>(Nelec); Nion = Ions.getTotalNum(); iGroups = Ions.getSpeciesSet().getTotalNum(); eGroups = p.groups(); Uat.resize(Nelec); dUat.resize(Nelec); d2Uat.resize(Nelec); oldUk.resize(Nelec); olddUk.resize(Nelec); oldd2Uk.resize(Nelec); newUk.resize(Nelec); newdUk.resize(Nelec); newd2Uk.resize(Nelec); F.resize(iGroups, eGroups, eGroups); F = nullptr; elecs_inside.resize(eGroups, Nion); elecs_inside_dist.resize(eGroups, Nion); elecs_inside_displ.resize(eGroups, Nion); ions_nearby_old.resize(Nion); ions_nearby_new.resize(Nion); Ion_cutoff.resize(Nion, 0.0); //initialize buffers Nbuffer = Nelec; mVGL.resize(Nbuffer); Distjk_Compressed.resize(Nbuffer); DistjI_Compressed.resize(Nbuffer); DistkI_Compressed.resize(Nbuffer); Disp_jk_Compressed.resize(Nbuffer); Disp_jI_Compressed.resize(Nbuffer); Disp_kI_Compressed.resize(Nbuffer); DistIndice_k.resize(Nbuffer); } void initUnique() { typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); du_dalpha.resize(J3Unique.size()); dgrad_dalpha.resize(J3Unique.size()); dhess_dalpha.resize(J3Unique.size()); int ifunc = 0; while (it != it_end) { J3UniqueIndex[it->second] = ifunc; FT& functor = *(it->second); int numParams = functor.getNumParameters(); du_dalpha[ifunc].resize(numParams); dgrad_dalpha[ifunc].resize(numParams); dhess_dalpha[ifunc].resize(numParams); ++it; ifunc++; } } void addFunc(int iSpecies, int eSpecies1, int eSpecies2, FT* j) { if (eSpecies1 == eSpecies2) { //if only up-up is specified, assume spin-unpolarized correlations if (eSpecies1 == 0) for (int eG1 = 0; eG1 < eGroups; eG1++) for (int eG2 = 0; eG2 < eGroups; eG2++) { if (F(iSpecies, eG1, eG2) == 0) F(iSpecies, eG1, eG2) = j; } } else { F(iSpecies, eSpecies1, eSpecies2) = j; F(iSpecies, eSpecies2, eSpecies1) = j; } if (j) { RealType rcut = 0.5 * j->cutoff_radius; for (int i = 0; i < Nion; i++) if (Ions.GroupID[i] == iSpecies) Ion_cutoff[i] = rcut; } else { APP_ABORT("JeeIOrbitalSoA::addFunc Jastrow function pointer is NULL"); } std::stringstream aname; aname << iSpecies << "_" << eSpecies1 << "_" << eSpecies2; J3Unique[aname.str()] = j; initUnique(); } /** check that correlation information is complete */ void check_complete() { //check that correlation pointers are either all 0 or all assigned bool complete = true; for (int i = 0; i < iGroups; ++i) { int nfilled = 0; bool partial; for (int e1 = 0; e1 < eGroups; ++e1) for (int e2 = 0; e2 < eGroups; ++e2) if (F(i, e1, e2) != 0) nfilled++; partial = nfilled > 0 && nfilled < eGroups * eGroups; if (partial) app_log() << "J3 eeI is missing correlation for ion " << i << std::endl; complete = complete && !partial; } if (!complete) { APP_ABORT("JeeIOrbitalSoA::check_complete J3 eeI is missing correlation components\n see preceding messages " "for details"); } //first set radii for (int i = 0; i < Nion; ++i) { FT* f = F(Ions.GroupID[i], 0, 0); if (f != 0) Ion_cutoff[i] = .5 * f->cutoff_radius; } //then check radii bool all_radii_match = true; for (int i = 0; i < iGroups; ++i) { if (F(i, 0, 0) != 0) { bool radii_match = true; RealType rcut = F(i, 0, 0)->cutoff_radius; for (int e1 = 0; e1 < eGroups; ++e1) for (int e2 = 0; e2 < eGroups; ++e2) radii_match = radii_match && F(i, e1, e2)->cutoff_radius == rcut; if (!radii_match) app_log() << "eeI functors for ion species " << i << " have different radii" << std::endl; all_radii_match = all_radii_match && radii_match; } } if (!all_radii_match) { APP_ABORT("JeeIOrbitalSoA::check_radii J3 eeI are inconsistent for some ion species\n see preceding messages " "for details"); } } //evaluate the distance table with els void resetTargetParticleSet(ParticleSet& P) {} /** check in an optimizable parameter * @param o a super set of optimizable variables */ void checkInVariables(opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->checkInVariables(active); (*it).second->checkInVariables(myVars); ++it; } } /** check out optimizable variables */ void checkOutVariables(const opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->myVars.getIndex(active); myVars.insertFrom((*it).second->myVars); ++it; } myVars.getIndex(active); NumVars = myVars.size(); if (NumVars) { dLogPsi.resize(NumVars); gradLogPsi.resize(NumVars, Nelec); lapLogPsi.resize(NumVars, Nelec); VarOffset.resize(iGroups, eGroups, eGroups); int varoffset = myVars.Index[0]; for (int ig = 0; ig < iGroups; ig++) for (int jg = 0; jg < eGroups; jg++) for (int kg = 0; kg < eGroups; kg++) { FT* func_ijk = F(ig, jg, kg); if (func_ijk == nullptr) continue; VarOffset(ig, jg, kg).first = func_ijk->myVars.Index.front() - varoffset; VarOffset(ig, jg, kg).second = func_ijk->myVars.Index.size() + VarOffset(ig, jg, kg).first; } } } ///reset the value of all the unique Two-Body Jastrow functions void resetParameters(const opt_variables_type& active) { if (!Optimizable) return; typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it++).second->resetParameters(active); } for (int i = 0; i < myVars.size(); ++i) { int ii = myVars.Index[i]; if (ii >= 0) myVars[i] = active[ii]; } } /** print the state, e.g., optimizables */ void reportStatus(std::ostream& os) { typename std::map<std::string, FT*>::iterator it(J3Unique.begin()), it_end(J3Unique.end()); while (it != it_end) { (*it).second->myVars.print(os); ++it; } } void build_compact_list(ParticleSet& P) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); for (int iat = 0; iat < Nion; ++iat) for (int jg = 0; jg < eGroups; ++jg) { elecs_inside(jg, iat).clear(); elecs_inside_dist(jg, iat).clear(); elecs_inside_displ(jg, iat).clear(); } for (int jg = 0; jg < eGroups; ++jg) for (int jel = P.first(jg); jel < P.last(jg); jel++) for (int iat = 0; iat < Nion; ++iat) if (eI_table.Distances[jel][iat] < Ion_cutoff[iat]) { elecs_inside(jg, iat).push_back(jel); elecs_inside_dist(jg, iat).push_back(eI_table.Distances[jel][iat]); elecs_inside_displ(jg, iat).push_back(eI_table.Displacements[jel][iat]); } } LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { evaluateGL(P, G, L, true); return LogValue; } PsiValueType ratio(ParticleSet& P, int iat) { UpdateMode = ORB_PBYP_RATIO; const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); cur_Uat = computeU(P, iat, P.GroupID[iat], eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new); DiffVal = Uat[iat] - cur_Uat; return std::exp(static_cast<PsiValueType>(DiffVal)); } void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for (int k = 0; k < ratios.size(); ++k) ratios[k] = std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.refPS.GroupID[VP.refPtcl], VP.getDistTable(ei_Table_ID_).Distances[k], VP.getDistTable(ee_Table_ID_).Distances[k], ions_nearby_old)); } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); for (int jg = 0; jg < eGroups; ++jg) { const valT sumU = computeU(P, -1, jg, eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new); for (int j = P.first(jg); j < P.last(jg); ++j) { // remove self-interaction valT Uself(0); for (int iat = 0; iat < Nion; ++iat) { const valT& r_Ij = eI_table.Temp_r[iat]; const valT& r_Ik = eI_table.Distances[j][iat]; if (r_Ij < Ion_cutoff[iat] && r_Ik < Ion_cutoff[iat]) { const int ig = Ions.GroupID[iat]; Uself += F(ig, jg, jg)->evaluate(ee_table.Temp_r[j], r_Ij, r_Ik); } } ratios[j] = std::exp(Uat[j] + Uself - sumU); } } } GradType evalGrad(ParticleSet& P, int iat) { return GradType(dUat[iat]); } PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode = ORB_PBYP_PARTIAL; const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); computeU3(P, iat, eI_table.Temp_r.data(), eI_table.Temp_dr, ee_table.Temp_r.data(), ee_table.Temp_dr, cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new); DiffVal = Uat[iat] - cur_Uat; grad_iat += cur_dUat; return std::exp(static_cast<PsiValueType>(DiffVal)); } inline void restore(int iat) {} void acceptMove(ParticleSet& P, int iat) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); // get the old value, grad, lapl computeU3(P, iat, eI_table.Distances[iat], eI_table.Displacements[iat], ee_table.Distances[iat], ee_table.Displacements[iat], Uat[iat], dUat_temp, d2Uat[iat], oldUk, olddUk, oldd2Uk, ions_nearby_old); if (UpdateMode == ORB_PBYP_RATIO) { //ratio-only during the move; need to compute derivatives computeU3(P, iat, eI_table.Temp_r.data(), eI_table.Temp_dr, ee_table.Temp_r.data(), ee_table.Temp_dr, cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new); } #pragma omp simd for (int jel = 0; jel < Nelec; jel++) { Uat[jel] += newUk[jel] - oldUk[jel]; d2Uat[jel] += newd2Uk[jel] - oldd2Uk[jel]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict new_g = newdUk.data(idim); const valT* restrict old_g = olddUk.data(idim); #pragma omp simd aligned(save_g, new_g, old_g) for (int jel = 0; jel < Nelec; jel++) save_g[jel] += new_g[jel] - old_g[jel]; } LogValue += Uat[iat] - cur_Uat; Uat[iat] = cur_Uat; dUat(iat) = cur_dUat; d2Uat[iat] = cur_d2Uat; const int ig = P.GroupID[iat]; // update compact list elecs_inside // if the old position exists in elecs_inside for (int iind = 0; iind < ions_nearby_old.size(); iind++) { int jat = ions_nearby_old[iind]; auto iter = std::find(elecs_inside(ig, jat).begin(), elecs_inside(ig, jat).end(), iat); auto iter_dist = elecs_inside_dist(ig, jat).begin() + std::distance(elecs_inside(ig, jat).begin(), iter); auto iter_displ = elecs_inside_displ(ig, jat).begin() + std::distance(elecs_inside(ig, jat).begin(), iter); if (eI_table.Temp_r[jat] < Ion_cutoff[jat]) // the new position is still inside { *iter_dist = eI_table.Temp_r[jat]; *iter_displ = eI_table.Temp_dr[jat]; *std::find(ions_nearby_new.begin(), ions_nearby_new.end(), jat) = -1; } else { *iter = elecs_inside(ig, jat).back(); elecs_inside(ig, jat).pop_back(); *iter_dist = elecs_inside_dist(ig, jat).back(); elecs_inside_dist(ig, jat).pop_back(); *iter_displ = elecs_inside_displ(ig, jat).back(); elecs_inside_displ(ig, jat).pop_back(); } } // if the old position doesn't exist in elecs_inside but the new position do for (int iind = 0; iind < ions_nearby_new.size(); iind++) { int jat = ions_nearby_new[iind]; if (jat >= 0) { elecs_inside(ig, jat).push_back(iat); elecs_inside_dist(ig, jat).push_back(eI_table.Temp_r[jat]); elecs_inside_displ(ig, jat).push_back(eI_table.Temp_dr[jat]); } } } inline void recompute(ParticleSet& P) { const DistanceTableData& eI_table = P.getDistTable(ei_Table_ID_); const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); build_compact_list(P); for (int jel = 0; jel < Nelec; ++jel) { computeU3(P, jel, eI_table.Distances[jel], eI_table.Displacements[jel], ee_table.Distances[jel], ee_table.Displacements[jel], Uat[jel], dUat_temp, d2Uat[jel], newUk, newdUk, newd2Uk, ions_nearby_new, true); dUat(jel) = dUat_temp; // add the contribution from the upper triangle #pragma omp simd for (int kel = 0; kel < jel; kel++) { Uat[kel] += newUk[kel]; d2Uat[kel] += newd2Uk[kel]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict new_g = newdUk.data(idim); #pragma omp simd aligned(save_g, new_g) for (int kel = 0; kel < jel; kel++) save_g[kel] += new_g[kel]; } } } inline valT computeU(const ParticleSet& P, int jel, int jg, const RealType* distjI, const RealType* distjk, std::vector<int>& ions_nearby) { ions_nearby.clear(); for (int iat = 0; iat < Nion; ++iat) if (distjI[iat] < Ion_cutoff[iat]) ions_nearby.push_back(iat); valT Uj = valT(0); for (int kg = 0; kg < eGroups; ++kg) { int kel_counter = 0; for (int iind = 0; iind < ions_nearby.size(); ++iind) { const int iat = ions_nearby[iind]; const int ig = Ions.GroupID[iat]; const valT r_jI = distjI[iat]; for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel != jel) { DistkI_Compressed[kel_counter] = elecs_inside_dist(kg, iat)[kind]; Distjk_Compressed[kel_counter] = distjk[kel]; DistjI_Compressed[kel_counter] = r_jI; kel_counter++; if (kel_counter == Nbuffer) { const FT& feeI(*F(ig, jg, kg)); Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data()); kel_counter = 0; } } } if ((iind + 1 == ions_nearby.size() || ig != Ions.GroupID[ions_nearby[iind + 1]]) && kel_counter > 0) { const FT& feeI(*F(ig, jg, kg)); Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data()); kel_counter = 0; } } } return Uj; } inline void computeU3_engine(const ParticleSet& P, const FT& feeI, int kel_counter, valT& Uj, posT& dUj, valT& d2Uj, Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk) { constexpr valT czero(0); constexpr valT cone(1); constexpr valT ctwo(2); constexpr valT lapfac = OHMMS_DIM - cone; valT* restrict val = mVGL.data(0); valT* restrict gradF0 = mVGL.data(1); valT* restrict gradF1 = mVGL.data(2); valT* restrict gradF2 = mVGL.data(3); valT* restrict hessF00 = mVGL.data(4); valT* restrict hessF11 = mVGL.data(5); valT* restrict hessF22 = mVGL.data(6); valT* restrict hessF01 = mVGL.data(7); valT* restrict hessF02 = mVGL.data(8); feeI.evaluateVGL(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data(), val, gradF0, gradF1, gradF2, hessF00, hessF11, hessF22, hessF01, hessF02); // compute the contribution to jel, kel Uj = simd::accumulate_n(val, kel_counter, Uj); valT gradF0_sum = simd::accumulate_n(gradF0, kel_counter, czero); valT gradF1_sum = simd::accumulate_n(gradF1, kel_counter, czero); valT hessF00_sum = simd::accumulate_n(hessF00, kel_counter, czero); valT hessF11_sum = simd::accumulate_n(hessF11, kel_counter, czero); d2Uj -= hessF00_sum + hessF11_sum + lapfac * (gradF0_sum + gradF1_sum); std::fill_n(hessF11, kel_counter, czero); for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict jk = Disp_jk_Compressed.data(idim); valT* restrict jI = Disp_jI_Compressed.data(idim); valT* restrict kI = Disp_kI_Compressed.data(idim); valT dUj_x(0); #pragma omp simd aligned(gradF0, gradF1, gradF2, hessF11, jk, jI, kI) reduction(+ : dUj_x) for (int kel_index = 0; kel_index < kel_counter; kel_index++) { // recycle hessF11 hessF11[kel_index] += kI[kel_index] * jk[kel_index]; dUj_x += gradF1[kel_index] * jI[kel_index]; // destroy jk, kI const valT temp = jk[kel_index] * gradF0[kel_index]; dUj_x += temp; jk[kel_index] *= jI[kel_index]; kI[kel_index] = kI[kel_index] * gradF2[kel_index] - temp; } dUj[idim] += dUj_x; valT* restrict jk0 = Disp_jk_Compressed.data(0); if (idim > 0) { #pragma omp simd aligned(jk, jk0) for (int kel_index = 0; kel_index < kel_counter; kel_index++) jk0[kel_index] += jk[kel_index]; } valT* restrict dUk_x = dUk.data(idim); for (int kel_index = 0; kel_index < kel_counter; kel_index++) dUk_x[DistIndice_k[kel_index]] += kI[kel_index]; } valT sum(0); valT* restrict jk0 = Disp_jk_Compressed.data(0); #pragma omp simd aligned(jk0, hessF01) reduction(+ : sum) for (int kel_index = 0; kel_index < kel_counter; kel_index++) sum += hessF01[kel_index] * jk0[kel_index]; d2Uj -= ctwo * sum; #pragma omp simd aligned(hessF00, hessF22, gradF0, gradF2, hessF02, hessF11) for (int kel_index = 0; kel_index < kel_counter; kel_index++) hessF00[kel_index] = hessF00[kel_index] + hessF22[kel_index] + lapfac * (gradF0[kel_index] + gradF2[kel_index]) - ctwo * hessF02[kel_index] * hessF11[kel_index]; for (int kel_index = 0; kel_index < kel_counter; kel_index++) { const int kel = DistIndice_k[kel_index]; Uk[kel] += val[kel_index]; d2Uk[kel] -= hessF00[kel_index]; } } inline void computeU3(const ParticleSet& P, int jel, const RealType* distjI, const RowContainer& displjI, const RealType* distjk, const RowContainer& displjk, valT& Uj, posT& dUj, valT& d2Uj, Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk, std::vector<int>& ions_nearby, bool triangle = false) { constexpr valT czero(0); Uj = czero; dUj = posT(); d2Uj = czero; const int jg = P.GroupID[jel]; const int kelmax = triangle ? jel : Nelec; std::fill_n(Uk.data(), kelmax, czero); std::fill_n(d2Uk.data(), kelmax, czero); for (int idim = 0; idim < OHMMS_DIM; ++idim) std::fill_n(dUk.data(idim), kelmax, czero); ions_nearby.clear(); for (int iat = 0; iat < Nion; ++iat) if (distjI[iat] < Ion_cutoff[iat]) ions_nearby.push_back(iat); for (int kg = 0; kg < eGroups; ++kg) { int kel_counter = 0; for (int iind = 0; iind < ions_nearby.size(); ++iind) { const int iat = ions_nearby[iind]; const int ig = Ions.GroupID[iat]; const valT r_jI = distjI[iat]; const posT disp_Ij = displjI[iat]; for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel < kelmax && kel != jel) { DistkI_Compressed[kel_counter] = elecs_inside_dist(kg, iat)[kind]; DistjI_Compressed[kel_counter] = r_jI; Distjk_Compressed[kel_counter] = distjk[kel]; Disp_kI_Compressed(kel_counter) = elecs_inside_displ(kg, iat)[kind]; Disp_jI_Compressed(kel_counter) = disp_Ij; Disp_jk_Compressed(kel_counter) = displjk[kel]; DistIndice_k[kel_counter] = kel; kel_counter++; if (kel_counter == Nbuffer) { const FT& feeI(*F(ig, jg, kg)); computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk); kel_counter = 0; } } } if ((iind + 1 == ions_nearby.size() || ig != Ions.GroupID[ions_nearby[iind + 1]]) && kel_counter > 0) { const FT& feeI(*F(ig, jg, kg)); computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk); kel_counter = 0; } } } } inline void registerData(ParticleSet& P, WFBufferType& buf) { if (Bytes_in_WFBuffer == 0) { Bytes_in_WFBuffer = buf.current(); buf.add(Uat.begin(), Uat.end()); buf.add(dUat.data(), dUat.end()); buf.add(d2Uat.begin(), d2Uat.end()); Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer; // free local space Uat.free(); dUat.free(); d2Uat.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec); dUat.attachReference(Nelec, Nelec_padded, buf.lendReference<valT>(Nelec_padded * OHMMS_DIM)); d2Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec); build_compact_list(P); } void evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch = false) { if (fromscratch) recompute(P); LogValue = valT(0); for (int iat = 0; iat < Nelec; ++iat) { LogValue += Uat[iat]; G[iat] += dUat[iat]; L[iat] += d2Uat[iat]; } LogValue = -LogValue * 0.5; } void evaluateDerivatives(ParticleSet& P, const opt_variables_type& optvars, std::vector<ValueType>& dlogpsi, std::vector<ValueType>& dhpsioverpsi) { bool recalculate(false); std::vector<bool> rcsingles(myVars.size(), false); for (int k = 0; k < myVars.size(); ++k) { int kk = myVars.where(k); if (kk < 0) continue; if (optvars.recompute(kk)) recalculate = true; rcsingles[k] = true; } if (recalculate) { constexpr valT czero(0); constexpr valT cone(1); constexpr valT cminus(-1); constexpr valT ctwo(2); constexpr valT lapfac = OHMMS_DIM - cone; const DistanceTableData& ee_table = P.getDistTable(ee_Table_ID_); build_compact_list(P); dLogPsi = czero; gradLogPsi = PosType(); lapLogPsi = czero; for (int iat = 0; iat < Nion; ++iat) { const int ig = Ions.GroupID[iat]; for (int jg = 0; jg < eGroups; ++jg) for (int jind = 0; jind < elecs_inside(jg, iat).size(); jind++) { const int jel = elecs_inside(jg, iat)[jind]; const valT r_Ij = elecs_inside_dist(jg, iat)[jind]; const posT disp_Ij = cminus * elecs_inside_displ(jg, iat)[jind]; const valT r_Ij_inv = cone / r_Ij; for (int kg = 0; kg < eGroups; ++kg) for (int kind = 0; kind < elecs_inside(kg, iat).size(); kind++) { const int kel = elecs_inside(kg, iat)[kind]; if (kel < jel) { const valT r_Ik = elecs_inside_dist(kg, iat)[kind]; const posT disp_Ik = cminus * elecs_inside_displ(kg, iat)[kind]; const valT r_Ik_inv = cone / r_Ik; const valT r_jk = ee_table.Distances[jel][kel]; const posT disp_jk = ee_table.Displacements[jel][kel]; const valT r_jk_inv = cone / r_jk; FT& func = *F(ig, jg, kg); int idx = J3UniqueIndex[F(ig, jg, kg)]; func.evaluateDerivatives(r_jk, r_Ij, r_Ik, du_dalpha[idx], dgrad_dalpha[idx], dhess_dalpha[idx]); int first = VarOffset(ig, jg, kg).first; int last = VarOffset(ig, jg, kg).second; std::vector<RealType>& dlog = du_dalpha[idx]; std::vector<PosType>& dgrad = dgrad_dalpha[idx]; std::vector<Tensor<RealType, 3>>& dhess = dhess_dalpha[idx]; for (int p = first, ip = 0; p < last; p++, ip++) { RealType& dval = dlog[ip]; PosType& dg = dgrad[ip]; Tensor<RealType, 3>& dh = dhess[ip]; dg[0] *= r_jk_inv; dg[1] *= r_Ij_inv; dg[2] *= r_Ik_inv; PosType gr_ee = dg[0] * disp_jk; gradLogPsi(p, jel) -= dg[1] * disp_Ij - gr_ee; lapLogPsi(p, jel) -= (dh(0, 0) + lapfac * dg[0] - ctwo * dh(0, 1) * dot(disp_jk, disp_Ij) * r_jk_inv * r_Ij_inv + dh(1, 1) + lapfac * dg[1]); gradLogPsi(p, kel) -= dg[2] * disp_Ik + gr_ee; lapLogPsi(p, kel) -= (dh(0, 0) + lapfac * dg[0] + ctwo * dh(0, 2) * dot(disp_jk, disp_Ik) * r_jk_inv * r_Ik_inv + dh(2, 2) + lapfac * dg[2]); dLogPsi[p] -= dval; } } } } } for (int k = 0; k < myVars.size(); ++k) { int kk = myVars.where(k); if (kk < 0) continue; dlogpsi[kk] = (ValueType)dLogPsi[k]; RealType sum = 0.0; for (int i = 0; i < Nelec; i++) { #if defined(QMC_COMPLEX) sum -= 0.5 * lapLogPsi(k, i); for (int jdim = 0; jdim < OHMMS_DIM; ++jdim) sum -= P.G[i][jdim].real() * gradLogPsi(k, i)[jdim]; #else sum -= 0.5 * lapLogPsi(k, i) + dot(P.G[i], gradLogPsi(k, i)); #endif } dhpsioverpsi[kk] = (ValueType)sum; } } } }; } // namespace qmcplusplus #endif
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/transform.h" #include "magick/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 128 #define MaxBezierCoordinates 4194304 #define ThrowPointExpectedException(image,token) \ { \ (void) ThrowMagickException(&(image)->exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *); static size_t TracePath(Image *,MVGInfo *,const char *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, &draw_info->fill_pattern->exception); else if (draw_info->tile != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue, &draw_info->tile->exception); clone_info->tile=NewImageList(); /* tile is deprecated */ if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,&draw_info->stroke_pattern->exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (x+4), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+4)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_opacity=draw_info->fill_opacity; clone_info->stroke_opacity=draw_info->stroke_opacity; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,&draw_info->clipping_mask->exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,&draw_info->composite_mask->exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath( const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; magick_unreferenced(draw_info); /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case PointPrimitive: case ColorPrimitive: case MattePrimitive: case TextPrimitive: case ImagePrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->tile != (Image *) NULL) draw_info->tile=DestroyImage(draw_info->tile); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; ExceptionInfo *exception; MagickBooleanType status; MagickPixelPacket zero; PointInfo extent[4], min, max, point; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetMagickPixelPacket(image,&zero); exception=(&image->exception); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { MagickPixelPacket composite, pixel; PointInfo point; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolateMagickPixelPacket(source,source_view, UndefinedInterpolatePixel,point.x,point.y,&pixel,exception); if (status == MagickFalse) break; SetMagickPixelPacket(image,q,indexes+x_offset,&composite); MagickPixelCompositeOver(&pixel,pixel.opacity,&composite, composite.opacity,&composite); SetPixelPacket(image,&composite,q,indexes+x_offset); x_offset++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % */ static inline double SaneStrokeWidth(const Image *image, const DrawInfo *draw_info) { return(MagickMin((double) draw_info->stroke_width, (2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows))); } static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info) { double mid; DrawInfo *clone_info; MagickBooleanType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorDatabase("#0000",&clone_info->fill,&image->exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(status); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* SaneStrokeWidth(image,clone_info)/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorDatabase("#f00",&clone_info->stroke, &image->exception); else status=QueryColorDatabase("#0f0",&clone_info->stroke, &image->exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status); } } status=QueryColorDatabase("#00f",&clone_info->stroke,&image->exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(status); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, &image->exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageClipMask(image,clipping_mask); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL); status=SetImageExtent(clip_mask,image->columns,image->rows); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageClipMask(image,(Image *) NULL); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.opacity=(Quantum) TransparentOpacity; status=SetImageBackgroundColor(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); (void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->opacity=OpaqueOpacity; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,1); clone_info=DestroyDrawInfo(clone_info); status&=SeparateImageChannel(clip_mask,TrueAlphaChannel); status&=NegateImage(clip_mask,MagickFalse); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL); status=SetImageExtent(composite_mask,image->columns,image->rows); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(image,(Image *) NULL); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->opacity=OpaqueOpacity; status=RenderMVGContent(composite_mask,clone_info,1); clone_info=DestroyDrawInfo(clone_info); status&=SeparateImageChannel(composite_mask,TrueAlphaChannel); status&=NegateImage(composite_mask,MagickFalse); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > MaxBezierCoordinates) break; if (fabs(length) < MagickEpsilon) { n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon); } n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; ExceptionInfo *exception; MagickBooleanType status; MagickPixelPacket zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; MagickPixelPacket composite, pixel; register IndexPacket *magick_restrict indexes; register ssize_t i, x; register PixelPacket *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,(double) gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat, (double) gradient->radius); else repeat=fmod(offset,(double) gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } MagickPixelCompositeOver(&composite,composite.opacity,&pixel, pixel.opacity,&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad; quantum=sizeof(**mvg_info->primitive_info); if (((extent*quantum) < (double) SSIZE_MAX) && ((extent*quantum) < (double) GetMaxMemoryRequest())) { if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=ResizeQuantumMemory(*mvg_info->primitive_info, (size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { (void) memset(*mvg_info->primitive_info+*mvg_info->extent,0, (extent-(*mvg_info->extent))*quantum); *mvg_info->extent=(size_t) extent; return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); *mvg_info->primitive_info=AcquireCriticalMemory(PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { register const char *end, *start; GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { GetNextToken(p,&p,extent,token); if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { char *macro; /* Extract macro. */ GetNextToken(p,&p,extent,token); macro=AcquireString(start); macro[end-start]='\0'; (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); macro=DestroyString(macro); break; } } } } } token=DestroyString(token); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char key[2*MaxTextExtent], keyword[MaxTextExtent], geometry[MaxTextExtent], name[MaxTextExtent], *next_token, pattern[MaxTextExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PixelPacket start_color; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryImageException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel); if (status == MagickFalse) return(status); } primitive=(char *) NULL; if (*draw_info->primitive != '@') primitive=AcquireString(draw_info->primitive); else if ((strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-')) primitive=FileToString(draw_info->primitive+1,~0UL,&image->exception); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"MVG",primitive); n=0; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.offset=0; mvg_info.exception=(&image->exception); graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MaxTextExtent; (void) QueryColorDatabase("#000000",&start_color,&image->exception); cursor=0.0; defsDepth=0; symbolDepth=0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ GetNextToken(q,&q,MaxTextExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&graphic_context[n]->border_color, &image->exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,&image->exception); if (draw_info->compliance != SVGCompliance) status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask); } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MaxTextExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern); else { status&=QueryColorDatabase(token,&graphic_context[n]->fill, &image->exception); if (graphic_context[n]->fill_opacity != OpaqueOpacity) graphic_context[n]->fill.opacity=ClampToQuantum( graphic_context[n]->fill_opacity); } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); graphic_context[n]->fill_opacity=(QuantumRange- graphic_context[n]->fill_opacity)*(1.0-opacity); if (graphic_context[n]->fill_opacity != OpaqueOpacity) graphic_context[n]->fill.opacity=ClampToQuantum( graphic_context[n]->fill_opacity); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics); graphic_context[n]->kerning=metrics.width* StringToDouble(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,&image->exception); if (draw_info->compliance != SVGCompliance) status=SetImageMask(image,graphic_context[n]->composite_mask); } break; } if (LocaleCompare("matte",keyword) == 0) { primitive_type=MattePrimitive; break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); graphic_context[n]->fill_opacity=(QuantumRange- graphic_context[n]->fill_opacity)*(1.0-opacity); if (graphic_context[n]->fill_opacity != OpaqueOpacity) graphic_context[n]->fill.opacity=ClampToQuantum( graphic_context[n]->fill_opacity); graphic_context[n]->stroke_opacity=(QuantumRange- graphic_context[n]->stroke_opacity)*(1.0-opacity); if (graphic_context[n]->stroke_opacity != OpaqueOpacity) graphic_context[n]->stroke.opacity=ClampToQuantum( graphic_context[n]->stroke_opacity); break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),DrawError, "UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (draw_info->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageClipMask(image,(Image *) NULL); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { char name[MaxTextExtent]; const char *clip_path; GetNextToken(q,&q,extent,token); (void) FormatLocaleString(name,MaxTextExtent,"%s",token); clip_path=(const char *) GetValueFromSplayTree(macros,name); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,name,clip_path); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MaxTextExtent], name[MaxTextExtent], type[MaxTextExtent]; SegmentInfo segment; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MaxTextExtent); GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MaxTextExtent); GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (LocaleCompare(type,"radial") == 0) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MaxTextExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MaxTextExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name); (void) FormatLocaleString(geometry,MaxTextExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(&image->exception, GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","`%s'",image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("mask",token) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { RectangleInfo bounds; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MaxTextExtent); GetNextToken(q,&q,extent,token); bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.width=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(image,token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MaxTextExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name); (void) FormatLocaleString(geometry,MaxTextExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("skewX",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { GradientType type; PixelPacket stop_color; GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&stop_color,&image->exception); type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,&start_color,&stop_color); start_color=stop_color; GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MaxTextExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern); else { status&=QueryColorDatabase(token,&graphic_context[n]->stroke, &image->exception); if (graphic_context[n]->stroke_opacity != OpaqueOpacity) graphic_context[n]->stroke.opacity=ClampToQuantum( graphic_context[n]->stroke_opacity); } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *p; p=q; GetNextToken(p,&p,extent,token); if (*token == ',') GetNextToken(p,&p,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { GetNextToken(p,&p,extent,token); if (*token == ',') GetNextToken(p,&p,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+4), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(&image->exception, GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","`%s'",image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+4)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); graphic_context[n]->stroke_opacity=(QuantumRange- graphic_context[n]->stroke_opacity)*(1.0-opacity); if (graphic_context[n]->stroke_opacity != OpaqueOpacity) graphic_context[n]->stroke.opacity=ClampToQuantum( graphic_context[n]->stroke_opacity); break; } if (LocaleCompare("stroke-width",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&graphic_context[n]->undercolor, &image->exception); break; } if (LocaleCompare("translate",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1); clone_info=DestroyDrawInfo(clone_info); } break; } break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; primitive_info[j].text=(char *) NULL; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(double) (BezierQuantum*primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), DrawError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0; if (coordinates > (MaxBezierCoordinates/4)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), DrawError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; } break; } default: break; } if (coordinates > MaxBezierCoordinates) { (void) ThrowMagickException(&image->exception,GetMagickModule(), DrawError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(image,&mvg_info,token); if (coordinates == 0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case ColorPrimitive: case MattePrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (primitive_info == (PrimitiveInfo *) NULL) break; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (draw_info->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask); status&=DrawPrimitive(image,graphic_context[n],primitive_info); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryImageException(DrawError, "NonconformingDrawingPrimitiveDefinition",keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info) { return(RenderMVGContent(image,draw_info,1)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern) { char property[MaxTextExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MaxTextExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info); image_info=DestroyImageInfo(image_info); (void) QueryColorDatabase("#00000000",&(*pattern)->background_color, &image->exception); (void) SetImageBackgroundColor(*pattern); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MaxTextExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,1); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(draw_info,primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetOpacityPixel(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_opacity) { double alpha, beta, distance, subpath_opacity; PointInfo delta; register EdgeInfo *p; register const PointInfo *q; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_opacity=0.0; subpath_opacity=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_opacity < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_opacity=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25))) *stroke_opacity=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0)) continue; if (distance <= 0.0) { subpath_opacity=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_opacity < (alpha*alpha)) subpath_opacity=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_opacity >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_opacity); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { CacheView *image_view; double mid; ExceptionInfo *exception; MagickBooleanType fill, status; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) { status=DrawBoundingRectangles(image,draw_info,polygon_info[0]); if (status == MagickFalse) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(status); } } RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) (void) GetFillColor(draw_info,x-start_x,y-start_y,q); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_opacity, stroke_opacity; PixelPacket fill_color, stroke_color; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { /* Fill and/or stroke. */ fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill, draw_info->fill_rule,x,y,&stroke_opacity); if (draw_info->stroke_antialias == MagickFalse) { fill_opacity=fill_opacity > 0.25 ? 1.0 : 0.0; stroke_opacity=stroke_opacity > 0.25 ? 1.0 : 0.0; } (void) GetFillColor(draw_info,x-start_x,y-start_y,&fill_color); fill_opacity=(double) (QuantumRange-fill_opacity*(QuantumRange- fill_color.opacity)); MagickCompositeOver(&fill_color,(MagickRealType) fill_opacity,q, (MagickRealType) q->opacity,q); (void) GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color); stroke_opacity=(double) (QuantumRange-stroke_opacity*(QuantumRange- stroke_color.opacity)); MagickCompositeOver(&stroke_color,(MagickRealType) stroke_opacity,q, (MagickRealType) q->opacity,q); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case MattePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "MattePrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { CacheView *image_view; ExceptionInfo *exception; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } exception=(&image->exception); status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelGray(&draw_info->fill) == MagickFalse) || (IsPixelGray(&draw_info->stroke) == MagickFalse))) status=SetImageColorspace(image,sRGBColorspace); if (draw_info->compliance == SVGCompliance) { status&=SetImageClipMask(image,draw_info->clipping_mask); status&=SetImageMask(image,draw_info->composite_mask); } x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelPacket *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelPacket target; status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) == MagickFalse) { q++; continue; } (void) GetFillColor(draw_info,x,y,q); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { MagickPixelPacket target; (void) GetOneVirtualMagickPixel(image,x,y,&target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(MagickRealType) draw_info->border_color.red; target.green=(MagickRealType) draw_info->border_color.green; target.blue=(MagickRealType) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x, y,primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue); break; } case ResetMethod: { MagickBooleanType sync; for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) GetFillColor(draw_info,x,y,q); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case MattePrimitive: { if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); switch (primitive_info->method) { case PointMethod: default: { PixelPacket pixel; PixelPacket *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelPacket pixel, target; status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) == MagickFalse) { q++; continue; } (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { MagickPixelPacket target; (void) GetOneVirtualMagickPixel(image,x,y,&target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(MagickRealType) draw_info->border_color.red; target.green=(MagickRealType) draw_info->border_color.green; target.blue=(MagickRealType) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x, y,primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue); break; } case ResetMethod: { MagickBooleanType sync; PixelPacket pixel; for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MaxTextExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, &image->exception); else { (void) CopyMagickString(clone_info->filename,primitive_info->text, MaxTextExtent); composite_images=ReadImage(clone_info,&image->exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=0; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { char geometry[MaxTextExtent]; /* Resize image. */ (void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!", primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; (void) TransformImage(&composite_image,(char *) NULL,geometry); } if (composite_image->matte == MagickFalse) (void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel); if (draw_info->opacity != OpaqueOpacity) (void) SetImageOpacity(composite_image,draw_info->opacity); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MaxTextExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry, &image->exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) (void) DrawAffineImage(image,composite_image,&affine); else (void) CompositeImage(image,draw_info->compose,composite_image, geometry.x,geometry.y); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelPacket fill_color; PixelPacket *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,&fill_color); MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q, (MagickRealType) q->opacity,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MaxTextExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.opacity != (Quantum) TransparentOpacity)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.opacity=(Quantum) TransparentOpacity; status&=DrawPolygonPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); (void) DrawDashPolygon(draw_info,primitive_info,image); break; } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; if ((mid > 1.0) && ((draw_info->stroke.opacity != (Quantum) TransparentOpacity) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { (void) DrawPolygonPrimitive(image,draw_info,primitive_info); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.opacity=(Quantum) TransparentOpacity; status&=DrawPolygonPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); status&=DrawStrokePolygon(image,draw_info,primitive_info); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageClipMask(image,(Image *) NULL); status&=SetImageMask(image,(Image *) NULL); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,&clone_info->stroke_pattern->exception); clone_info->stroke.opacity=(Quantum) TransparentOpacity; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(image,draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p); status&=DrawRoundLinecap(image,draw_info,q); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorDatabase("#000F",&draw_info->fill,exception); (void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->opacity=OpaqueOpacity; draw_info->fill_opacity=OpaqueOpacity; draw_info->stroke_opacity=OpaqueOpacity; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; draw_info->pointsize=12.0; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->undercolor.opacity=(Quantum) TransparentOpacity; draw_info->border_color=clone_info->border_color; draw_info->compose=OverCompositeOp; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->fill,exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->stroke,exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->undercolor,exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickBooleanType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+ MagickEpsilon)))); p=primitive_info; status=MagickTrue; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == MagickFalse) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(status); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) quantum) quantum=(size_t) alpha; } } quantum=(size_t) MagickMin((double) quantum/number_coordinates, (double) BezierQuantum); control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; coefficients=(double *) AcquireQuantumMemory((size_t) number_coordinates,sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) return(MagickFalse); p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if ((coordinates > (double) SSIZE_MAX) || (coordinates > (double) GetMaxMemoryRequest())) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static size_t TracePath(Image *image,MVGInfo *mvg_info,const char *path) { char *next_token, token[MaxTextExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); arc.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); arc.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); status&=TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(image,token); break; } } } if (status == MagickFalse) return(0); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return(number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; if ((fabs(start.x-end.x) < MagickEpsilon) || (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->coordinates=0; return(MagickTrue); } p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { #define CheckPathExtent(pad) \ if ((q+(pad)) >= (ssize_t) max_strokes) \ { \ if (~max_strokes < (pad)) \ { \ path_p=(PointInfo *) RelinquishMagickMemory(path_p); \ path_q=(PointInfo *) RelinquishMagickMemory(path_q); \ } \ else \ { \ max_strokes+=(pad); \ path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \ sizeof(*path_p)); \ path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \ sizeof(*path_q)); \ } \ if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \ { \ if (path_p != (PointInfo *) NULL) \ path_p=(PointInfo *) RelinquishMagickMemory(path_p); \ if (path_q != (PointInfo *) NULL) \ path_q=(PointInfo *) RelinquishMagickMemory(path_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx = {0,0}, dy = {0,0}, inverse_slope = {0,0}, slope = {0,0}, theta = {0,0}; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* Allocate paths. */ number_vertices=primitive_info->coordinates; max_strokes=2*number_vertices+6*BezierQuantum+360; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return((PrimitiveInfo *) NULL); (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); closed_path=primitive_info[0].closed_subpath; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); if (path_p == (PointInfo *) NULL) { polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); if (path_q == (PointInfo *) NULL) { path_p=(PointInfo *) RelinquishMagickMemory(path_p); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; path_q[p++]=box_q[0]; path_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(6*BezierQuantum+360); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+6*BezierQuantum+360); path_q[q].x=box_q[1].x; path_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } path_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+6*BezierQuantum+360); path_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } path_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } path_p[p++]=box_p[1]; path_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
GB_unop__identity_fp64_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_fc32) // op(A') function: GB (_unop_tran__identity_fp64_fc32) // C type: double // A type: GxB_FC32_t // cast: double cij = (double) crealf (aij) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) crealf (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) crealf (aij) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_fc32) ( double *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; double z = (double) crealf (aij) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; double z = (double) crealf (aij) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/feature.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a n n y E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of % edges in images. % % The format of the CannyEdgeImage method is: % % Image *CannyEdgeImage(const Image *image,const double radius, % const double sigma,const double lower_percent, % const double upper_percent,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the gaussian smoothing filter. % % o sigma: the sigma of the gaussian smoothing filter. % % o lower_percent: percentage of edge pixels in the lower threshold. % % o upper_percent: percentage of edge pixels in the upper threshold. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CannyInfo { double magnitude, intensity; int orientation; ssize_t x, y; } CannyInfo; static inline MagickBooleanType IsAuthenticPixel(const Image *image, const ssize_t x,const ssize_t y) { if ((x < 0) || (x >= (ssize_t) image->columns)) return(MagickFalse); if ((y < 0) || (y >= (ssize_t) image->rows)) return(MagickFalse); return(MagickTrue); } static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view, MatrixInfo *canny_cache,const ssize_t x,const ssize_t y, const double lower_threshold,ExceptionInfo *exception) { CannyInfo edge, pixel; MagickBooleanType status; Quantum *q; ssize_t i; q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); edge.x=x; edge.y=y; if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); for (i=1; i != 0; ) { ssize_t v; i--; status=GetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); for (v=(-1); v <= 1; v++) { ssize_t u; for (u=(-1); u <= 1; u++) { if ((u == 0) && (v == 0)) continue; if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse) continue; /* Not an edge if gradient value is below the lower threshold. */ q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1, exception); if (q == (Quantum *) NULL) return(MagickFalse); status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel); if (status == MagickFalse) return(MagickFalse); if ((GetPixelIntensity(edge_image,q) == 0.0) && (pixel.intensity >= lower_threshold)) { *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); edge.x+=u; edge.y+=v; status=SetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); i++; } } } } return(MagickTrue); } MagickExport Image *CannyEdgeImage(const Image *image,const double radius, const double sigma,const double lower_percent,const double upper_percent, ExceptionInfo *exception) { #define CannyEdgeImageTag "CannyEdge/Image" CacheView *edge_view; CannyInfo element; char geometry[MagickPathExtent]; double lower_threshold, max, min, upper_threshold; Image *edge_image; KernelInfo *kernel_info; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *canny_cache; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Filter out noise. */ (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (edge_image == (Image *) NULL) return((Image *) NULL); if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } (void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception); /* Find the intensity gradient of the image. */ canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows, sizeof(CannyInfo),exception); if (canny_cache == (MatrixInfo *) NULL) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } status=MagickTrue; edge_view=AcquireVirtualCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; double dx, dy; const Quantum *magick_restrict kernel_pixels; ssize_t v; static double Gx[2][2] = { { -1.0, +1.0 }, { -1.0, +1.0 } }, Gy[2][2] = { { +1.0, +1.0 }, { -1.0, -1.0 } }; (void) memset(&pixel,0,sizeof(pixel)); dx=0.0; dy=0.0; kernel_pixels=p; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { double intensity; intensity=GetPixelIntensity(edge_image,kernel_pixels+u); dx+=0.5*Gx[v][u]*intensity; dy+=0.5*Gy[v][u]*intensity; } kernel_pixels+=edge_image->columns+1; } pixel.magnitude=hypot(dx,dy); pixel.orientation=0; if (fabs(dx) > MagickEpsilon) { double slope; slope=dy/dx; if (slope < 0.0) { if (slope < -2.41421356237) pixel.orientation=0; else if (slope < -0.414213562373) pixel.orientation=1; else pixel.orientation=2; } else { if (slope > 2.41421356237) pixel.orientation=0; else if (slope > 0.414213562373) pixel.orientation=3; else pixel.orientation=2; } } if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse) continue; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); /* Non-maxima suppression, remove pixels that are not considered to be part of an edge. */ progress=0; (void) GetMatrixElement(canny_cache,0,0,&element); max=element.intensity; min=element.intensity; edge_view=AcquireAuthenticCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo alpha_pixel, beta_pixel, pixel; (void) GetMatrixElement(canny_cache,x,y,&pixel); switch (pixel.orientation) { case 0: default: { /* 0 degrees, north and south. */ (void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel); break; } case 1: { /* 45 degrees, northwest and southeast. */ (void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel); break; } case 2: { /* 90 degrees, east and west. */ (void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel); break; } case 3: { /* 135 degrees, northeast and southwest. */ (void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel); (void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel); break; } } pixel.intensity=pixel.magnitude; if ((pixel.magnitude < alpha_pixel.magnitude) || (pixel.magnitude < beta_pixel.magnitude)) pixel.intensity=0; (void) SetMatrixElement(canny_cache,x,y,&pixel); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif { if (pixel.intensity < min) min=pixel.intensity; if (pixel.intensity > max) max=pixel.intensity; } *q=0; q+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse) status=MagickFalse; } edge_view=DestroyCacheView(edge_view); /* Estimate hysteresis threshold. */ lower_threshold=lower_percent*(max-min)+min; upper_threshold=upper_percent*(max-min)+min; /* Hysteresis threshold. */ edge_view=AcquireAuthenticCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; const Quantum *magick_restrict p; /* Edge if pixel gradient higher than upper threshold. */ p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception); if (p == (const Quantum *) NULL) continue; status=GetMatrixElement(canny_cache,x,y,&pixel); if (status == MagickFalse) continue; if ((GetPixelIntensity(edge_image,p) == 0.0) && (pixel.intensity >= upper_threshold)) status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold, exception); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } edge_view=DestroyCacheView(edge_view); /* Free resources. */ canny_cache=DestroyMatrixInfo(canny_cache); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance, % difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageFeatures(image,1,exception); % contrast=channel_features[RedPixelChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageFeatures method is: % % ChannelFeatures *GetImageFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelFeatures *GetImageFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { PixelInfo direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; PixelPacket gray, *grays; MagickBooleanType status; ssize_t i, r; size_t length; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=MaxPixelChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (PixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].alpha=(~0U); grays[i].black=(~0U); } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (r=0; r < (ssize_t) image->rows; r++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(GetPixelRed(image,p))].red= ScaleQuantumToMap(GetPixelRed(image,p)); grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green= ScaleQuantumToMap(GetPixelGreen(image,p)); grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue= ScaleQuantumToMap(GetPixelBlue(image,p)); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black= ScaleQuantumToMap(GetPixelBlack(image,p)); if (image->alpha_trait != UndefinedPixelTrait) grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha= ScaleQuantumToMap(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) memset(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[gray.blue++].blue=grays[i].blue; if (image->colorspace == CMYKColorspace) if (grays[i].black != ~0U) grays[gray.black++].black=grays[i].black; if (image->alpha_trait != UndefinedPixelTrait) if (grays[i].alpha != ~0U) grays[gray.alpha++].alpha=grays[i].alpha; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->colorspace == CMYKColorspace) if (gray.black > number_grays) number_grays=gray.black; if (image->alpha_trait != UndefinedPixelTrait) if (gray.alpha > number_grays) number_grays=gray.alpha; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1, 2*sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1, 2*sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(number_grays+1, 2*sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) memset(&correlation,0,sizeof(correlation)); (void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) memset(&mean,0,sizeof(mean)); (void) memset(sum,0,number_grays*sizeof(*sum)); (void) memset(&sum_squares,0,sizeof(sum_squares)); (void) memset(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) memset(&entropy_x,0,sizeof(entropy_x)); (void) memset(&entropy_xy,0,sizeof(entropy_xy)); (void) memset(&entropy_xy1,0,sizeof(entropy_xy1)); (void) memset(&entropy_xy2,0,sizeof(entropy_xy2)); (void) memset(&entropy_y,0,sizeof(entropy_y)); (void) memset(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) memset(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) memset(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (r=0; r < (ssize_t) image->rows; r++) { const Quantum *magick_restrict p; ssize_t x; ssize_t offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+ 2*distance,distance+2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } p+=distance*GetPixelChannels(image);; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p))) u++; while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p))) u++; while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p))) u++; while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p))) u++; while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].black++; cooccurrence[v][u].direction[i].black++; } if (image->alpha_trait != UndefinedPixelTrait) { u=0; v=0; while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p))) u++; while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].alpha++; cooccurrence[v][u].direction[i].alpha++; } } p+=GetPixelChannels(image); } } grays=(PixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ for (i=0; i < 4; i++) { double normalize; ssize_t y; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } normalize=PerceptibleReciprocal(normalize); for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red*=normalize; cooccurrence[x][y].direction[i].green*=normalize; cooccurrence[x][y].direction[i].blue*=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].black*=normalize; if (image->alpha_trait != UndefinedPixelTrait) cooccurrence[x][y].direction[i].alpha*=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BluePixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].black* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].alpha* cooccurrence[x][y].direction[i].alpha; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) correlation.direction[i].black+=x*y* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) correlation.direction[i].alpha+=x*y* cooccurrence[x][y].direction[i].alpha; /* Inverse Difference Moment. */ channel_features[RedPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BluePixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[y+x+2].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Entropy. */ channel_features[RedPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); channel_features[GreenPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); channel_features[BluePixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].alpha* MagickLog10(cooccurrence[x][y].direction[i].alpha); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->alpha_trait != UndefinedPixelTrait) density_x[x].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].black+= cooccurrence[x][y].direction[i].black; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_y[y].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->colorspace == CMYKColorspace) { mean.direction[i].black+=y*sum[y].direction[i].black; sum_squares.direction[i].black+=y*y*sum[y].direction[i].black; } if (image->alpha_trait != UndefinedPixelTrait) { mean.direction[i].alpha+=y*sum[y].direction[i].alpha; sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedPixelChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenPixelChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BluePixelChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].correlation[i]= (correlation.direction[i].black-mean.direction[i].black* mean.direction[i].black)/(sqrt(sum_squares.direction[i].black- (mean.direction[i].black*mean.direction[i].black))*sqrt( sum_squares.direction[i].black-(mean.direction[i].black* mean.direction[i].black))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].correlation[i]= (correlation.direction[i].alpha-mean.direction[i].alpha* mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha- (mean.direction[i].alpha*mean.direction[i].alpha))*sqrt( sum_squares.direction[i].alpha-(mean.direction[i].alpha* mean.direction[i].alpha))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].alpha; /* Sum entropy. */ channel_features[RedPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Sum variance. */ channel_features[RedPixelChannel].sum_variance[i]+= (x-channel_features[RedPixelChannel].sum_entropy[i])* (x-channel_features[RedPixelChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_variance[i]+= (x-channel_features[GreenPixelChannel].sum_entropy[i])* (x-channel_features[GreenPixelChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_variance[i]+= (x-channel_features[BluePixelChannel].sum_entropy[i])* (x-channel_features[BluePixelChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_variance[i]+= (x-channel_features[BlackPixelChannel].sum_entropy[i])* (x-channel_features[BlackPixelChannel].sum_entropy[i])* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_variance[i]+= (x-channel_features[AlphaPixelChannel].sum_entropy[i])* (x-channel_features[AlphaPixelChannel].sum_entropy[i])* density_xy[x].direction[i].alpha; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=(y-mean.direction[i].black+1)* (y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)* (y-mean.direction[i].alpha+1)* cooccurrence[x][y].direction[i].alpha; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy.direction[i].alpha-= cooccurrence[x][y].direction[i].alpha*MagickLog10( cooccurrence[x][y].direction[i].alpha); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].black-=( cooccurrence[x][y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy1.direction[i].alpha-=( cooccurrence[x][y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red* density_y[y].direction[i].red)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue* density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].black-=(density_x[x].direction[i].black* density_y[y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha* density_y[y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); } } channel_features[RedPixelChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenPixelChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BluePixelChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].variance_sum_of_squares[i]= variance.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].variance_sum_of_squares[i]= variance.direction[i].alpha; } /* Compute more texture features. */ (void) memset(&variance,0,sizeof(variance)); (void) memset(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=density_xy[x].direction[i].alpha; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].black+=density_xy[x].direction[i].black* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha* density_xy[x].direction[i].alpha; /* Difference entropy. */ channel_features[RedPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* MagickLog10(density_x[x].direction[i].red)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* MagickLog10(density_x[x].direction[i].green)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* MagickLog10(density_x[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].black-=(density_x[x].direction[i].black* MagickLog10(density_x[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha* MagickLog10(density_x[x].direction[i].alpha)); entropy_y.direction[i].red-=(density_y[x].direction[i].red* MagickLog10(density_y[x].direction[i].red)); entropy_y.direction[i].green-=(density_y[x].direction[i].green* MagickLog10(density_y[x].direction[i].green)); entropy_y.direction[i].blue-=(density_y[x].direction[i].blue* MagickLog10(density_y[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].black-=(density_y[x].direction[i].black* MagickLog10(density_y[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha* MagickLog10(density_y[x].direction[i].alpha)); } /* Difference variance. */ channel_features[RedPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BluePixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].black)- (variance.direction[i].black*variance.direction[i].black))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].alpha)- (variance.direction[i].alpha*variance.direction[i].alpha))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BluePixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/ (entropy_x.direction[i].black > entropy_y.direction[i].black ? entropy_x.direction[i].black : entropy_y.direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/ (entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ? entropy_x.direction[i].alpha : entropy_y.direction[i].alpha); channel_features[RedPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BluePixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black- entropy_xy.direction[i].black))))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha- entropy_xy.direction[i].alpha))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t z; for (z=0; z < (ssize_t) number_grays; z++) { ssize_t y; ChannelStatistics pixel; (void) memset(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) pixel.direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) pixel.direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } /* Maximum Correlation Coefficient. */ if ((fabs(density_x[z].direction[i].red) > MagickEpsilon) && (fabs(density_y[x].direction[i].red) > MagickEpsilon)) Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; if ((fabs(density_x[z].direction[i].green) > MagickEpsilon) && (fabs(density_y[x].direction[i].red) > MagickEpsilon)) Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; if ((fabs(density_x[z].direction[i].blue) > MagickEpsilon) && (fabs(density_y[x].direction[i].blue) > MagickEpsilon)) Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/ density_x[z].direction[i].blue/density_y[x].direction[i].blue; if (image->colorspace == CMYKColorspace) if ((fabs(density_x[z].direction[i].black) > MagickEpsilon) && (fabs(density_y[x].direction[i].black) > MagickEpsilon)) Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black* cooccurrence[y][x].direction[i].black/ density_x[z].direction[i].black/density_y[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) if ((fabs(density_x[z].direction[i].alpha) > MagickEpsilon) && (fabs(density_y[x].direction[i].alpha) > MagickEpsilon)) Q[z][y].direction[i].alpha+= cooccurrence[z][x].direction[i].alpha* cooccurrence[y][x].direction[i].alpha/ density_x[z].direction[i].alpha/ density_y[x].direction[i].alpha; } } channel_features[RedPixelChannel].contrast[i]+=z*z* pixel.direction[i].red; channel_features[GreenPixelChannel].contrast[i]+=z*z* pixel.direction[i].green; channel_features[BluePixelChannel].contrast[i]+=z*z* pixel.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].contrast[i]+=z*z* pixel.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].contrast[i]+=z*z* pixel.direction[i].alpha; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BluePixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H o u g h L i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HoughLineImage() can be used in conjunction with any binary edge extracted % image (we recommend Canny) to identify lines in the image. The algorithm % accumulates counts for every white pixel for every possible orientation (for % angles from 0 to 179 in 1 degree increments) and distance from the center of % the image to the corner (in 1 px increments) and stores the counts in an % accumulator matrix of angle vs distance. The size of the accumulator is % 180x(diagonal/2). Next it searches this space for peaks in counts and % converts the locations of the peaks to slope and intercept in the normal % x,y input image space. Use the slope/intercepts to find the endpoints % clipped to the bounds of the image. The lines are then drawn. The counts % are a measure of the length of the lines. % % The format of the HoughLineImage method is: % % Image *HoughLineImage(const Image *image,const size_t width, % const size_t height,const size_t threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find line pairs as local maxima in this neighborhood. % % o threshold: the line count threshold. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define BoundingBox "viewbox" DrawInfo *draw_info; Image *image; MagickBooleanType status; /* Open image. */ image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->columns=columns; image->rows=rows; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/ DefaultResolution; draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/ DefaultResolution; image->columns=(size_t) (draw_info->affine.sx*image->columns); image->rows=(size_t) (draw_info->affine.sy*image->rows); status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image,exception) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Render drawing. */ if (GetBlobStreamData(image) == (unsigned char *) NULL) draw_info->primitive=FileToString(image->filename,~0UL,exception); else { draw_info->primitive=(char *) AcquireQuantumMemory(1,(size_t) GetBlobSize(image)+1); if (draw_info->primitive != (char *) NULL) { (void) memcpy(draw_info->primitive,GetBlobStreamData(image), (size_t) GetBlobSize(image)); draw_info->primitive[GetBlobSize(image)]='\0'; } } (void) DrawImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } MagickExport Image *HoughLineImage(const Image *image,const size_t width, const size_t height,const size_t threshold,ExceptionInfo *exception) { #define HoughLineImageTag "HoughLine/Image" CacheView *image_view; char message[MagickPathExtent], path[MagickPathExtent]; const char *artifact; double hough_height; Image *lines_image = NULL; ImageInfo *image_info; int file; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *accumulator; PointInfo center; ssize_t y; size_t accumulator_height, accumulator_width, line_count; /* Create the accumulator. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); accumulator_width=180; hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ? image->rows : image->columns))/2.0); accumulator_height=(size_t) (2.0*hough_height); accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height, sizeof(double),exception); if (accumulator == (MatrixInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (NullMatrix(accumulator) == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Populate the accumulator. */ status=MagickTrue; progress=0; center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelIntensity(image,p) > (QuantumRange/2.0)) { ssize_t i; for (i=0; i < 180; i++) { double count, radius; radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+ (((double) y-center.y)*sin(DegreesToRadians((double) i))); (void) GetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); count++; (void) SetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); } } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } /* Generate line segments from accumulator. */ file=AcquireUniqueFileResource(path); if (file == -1) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } (void) FormatLocaleString(message,MagickPathExtent, "# Hough line transform: %.20gx%.20g%+.20g\n",(double) width, (double) height,(double) threshold); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "# x1,y1 x2,y2 # count angle distance\n"); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; line_count=image->columns > image->rows ? image->columns/4 : image->rows/4; if (threshold != 0) line_count=threshold; for (y=0; y < (ssize_t) accumulator_height; y++) { ssize_t x; for (x=0; x < (ssize_t) accumulator_width; x++) { double count; (void) GetMatrixElement(accumulator,x,y,&count); if (count >= (double) line_count) { double maxima; SegmentInfo line; ssize_t v; /* Is point a local maxima? */ maxima=count; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((u != 0) || (v !=0)) { (void) GetMatrixElement(accumulator,x+u,y+v,&count); if (count > maxima) { maxima=count; break; } } } if (u < (ssize_t) (width/2)) break; } (void) GetMatrixElement(accumulator,x,y,&count); if (maxima > count) continue; if ((x >= 45) && (x <= 135)) { /* y = (r-x cos(t))/sin(t) */ line.x1=0.0; line.y1=((double) (y-(accumulator_height/2.0))-((line.x1- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); line.x2=(double) image->columns; line.y2=((double) (y-(accumulator_height/2.0))-((line.x2- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); } else { /* x = (r-y cos(t))/sin(t) */ line.y1=0.0; line.x1=((double) (y-(accumulator_height/2.0))-((line.y1- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); line.y2=(double) image->rows; line.x2=((double) (y-(accumulator_height/2.0))-((line.y2- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); } (void) FormatLocaleString(message,MagickPathExtent, "line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2, maxima,(double) x,(double) y); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; } } } (void) close(file); /* Render lines to image canvas. */ image_info=AcquireImageInfo(); image_info->background_color=image->background_color; (void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"background",artifact); artifact=GetImageArtifact(image,"fill"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"fill",artifact); artifact=GetImageArtifact(image,"stroke"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"stroke",artifact); artifact=GetImageArtifact(image,"strokewidth"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"strokewidth",artifact); lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception); artifact=GetImageArtifact(image,"hough-lines:accumulator"); if ((lines_image != (Image *) NULL) && (IsStringTrue(artifact) != MagickFalse)) { Image *accumulator_image; accumulator_image=MatrixToImage(accumulator,exception); if (accumulator_image != (Image *) NULL) AppendImageToList(&lines_image,accumulator_image); } /* Free resources. */ accumulator=DestroyMatrixInfo(accumulator); image_info=DestroyImageInfo(image_info); (void) RelinquishUniqueFileResource(path); return(GetFirstImageInList(lines_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e a n S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MeanShiftImage() delineate arbitrarily shaped clusters in the image. For % each pixel, it visits all the pixels in the neighborhood specified by % the window centered at the pixel and excludes those that are outside the % radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those % that are within the specified color distance from the current mean, and % computes a new x,y centroid from those coordinates and a new mean. This new % x,y centroid is used as the center for a new window. This process iterates % until it converges and the final mean is replaces the (original window % center) pixel value. It repeats this process for the next pixel, etc., % until it processes all pixels in the image. Results are typically better with % colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr. % % The format of the MeanShiftImage method is: % % Image *MeanShiftImage(const Image *image,const size_t width, % const size_t height,const double color_distance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find pixels in this neighborhood. % % o color_distance: the color distance. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MeanShiftImage(const Image *image,const size_t width, const size_t height,const double color_distance,ExceptionInfo *exception) { #define MaxMeanShiftIterations 100 #define MeanShiftImageTag "MeanShift/Image" CacheView *image_view, *mean_view, *pixel_view; Image *mean_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); mean_image=CloneImage(image,0,0,MagickTrue,exception); if (mean_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse) { mean_image=DestroyImage(mean_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); pixel_view=AcquireVirtualCacheView(image,exception); mean_view=AcquireAuthenticCacheView(mean_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,progress) \ magick_number_threads(mean_image,mean_image,mean_image->rows,1) #endif for (y=0; y < (ssize_t) mean_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) mean_image->columns; x++) { PixelInfo mean_pixel, previous_pixel; PointInfo mean_location, previous_location; ssize_t i; GetPixelInfo(image,&mean_pixel); GetPixelInfoPixel(image,p,&mean_pixel); mean_location.x=(double) x; mean_location.y=(double) y; for (i=0; i < MaxMeanShiftIterations; i++) { double distance, gamma; PixelInfo sum_pixel; PointInfo sum_location; ssize_t count, v; sum_location.x=0.0; sum_location.y=0.0; GetPixelInfo(image,&sum_pixel); previous_location=mean_location; previous_pixel=mean_pixel; count=0; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2))) { PixelInfo pixel; status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t) MagickRound(mean_location.x+u),(ssize_t) MagickRound( mean_location.y+v),&pixel,exception); distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+ (mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+ (mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue); if (distance <= (color_distance*color_distance)) { sum_location.x+=mean_location.x+u; sum_location.y+=mean_location.y+v; sum_pixel.red+=pixel.red; sum_pixel.green+=pixel.green; sum_pixel.blue+=pixel.blue; sum_pixel.alpha+=pixel.alpha; count++; } } } } gamma=PerceptibleReciprocal(count); mean_location.x=gamma*sum_location.x; mean_location.y=gamma*sum_location.y; mean_pixel.red=gamma*sum_pixel.red; mean_pixel.green=gamma*sum_pixel.green; mean_pixel.blue=gamma*sum_pixel.blue; mean_pixel.alpha=gamma*sum_pixel.alpha; distance=(mean_location.x-previous_location.x)* (mean_location.x-previous_location.x)+ (mean_location.y-previous_location.y)* (mean_location.y-previous_location.y)+ 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)* 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+ 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)* 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+ 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)* 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue); if (distance <= 3.0) break; } SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q); SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q); SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q); SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q); p+=GetPixelChannels(image); q+=GetPixelChannels(mean_image); } if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MeanShiftImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } mean_view=DestroyCacheView(mean_view); pixel_view=DestroyCacheView(pixel_view); image_view=DestroyCacheView(image_view); return(mean_image); }
mkl_util.h
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #ifdef INTEL_MKL #include <list> #include <memory> #include <string> #include <unordered_map> #include <utility> #include <vector> #if defined(INTEL_MKL_ML_ONLY) || defined(INTEL_MKL_DNN_ONLY) #ifndef INTEL_MKL #error "INTEL_MKL_{ML,DNN}_ONLY require INTEL_MKL" #endif #endif #if defined(INTEL_MKL_ML_ONLY) && defined(INTEL_MKL_DNN_ONLY) #error "at most one of INTEL_MKL_ML_ONLY and INTEL_MKL_DNN_ONLY may be defined" #endif #ifdef INTEL_MKL_ML_ONLY #error "Please use INTEL MKL DNN (the default option for --config=mkl)." #endif #ifdef INTEL_MKL_ML_ONLY #include "mkl_dnn.h" #include "mkl_dnn_types.h" #include "mkl_service.h" #include "mkl_trans.h" #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/graph/mkl_graph_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/env_var.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #ifndef INTEL_MKL_ML_ONLY #include "mkldnn.hpp" #include "tensorflow/core/lib/core/stringpiece.h" using mkldnn::engine; using mkldnn::memory; using mkldnn::padding_kind; using mkldnn::primitive; using mkldnn::reorder; #endif #ifdef _WIN32 typedef unsigned int uint; #endif namespace tensorflow { // The file contains a number of utility classes and functions used by MKL // enabled kernels // This class encapsulates all the meta data that is associated with an MKL // tensor. A tensor is an MKL tensor if it was created as the result of an // MKL operation, and did not go through a conversion to a standard // Tensorflow tensor. // For use with MKL ML, has been deprecated typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims; // The dimensions order that MKL-DNN internally uses for 2D activations // [Batch, Channel, Height, Width] and // for 2D filters [Out_Channel, In_Channel, Height, Width]. typedef enum { Dim_N = 0, Dim_C = 1, Dim_H = 2, Dim_W = 3, Dim_O = 0, Dim_I = 1 } MklDnnDims; // The dimensions order that MKL-DNN internally uses for 3D activations // [Batch, Channel, Depth, Height, Width] and // for 3D filters [Out_Channel, In_Channel, Depth, Height, Width]. typedef enum { Dim3d_N = 0, Dim3d_C = 1, Dim3d_D = 2, Dim3d_H = 3, Dim3d_W = 4, Dim3d_O = 0, Dim3d_I = 1 } MklDnnDims3D; // Enum for the order of dimensions of a TF 2D filter with shape [filter_height, // filter_width, in_channels, out_channels] typedef enum { TF_2DFILTER_DIM_H = 0, TF_2DFILTER_DIM_W = 1, TF_2DFILTER_DIM_I = 2, TF_2DFILTER_DIM_O = 3 } TFFilterDims2d; // Enum for the order of dimensions of a TF 3D filter with shape [filter_depth, // filter_height, filter_width, in_channels, out_channels] typedef enum { TF_3DFILTER_DIM_P = 0, TF_3DFILTER_DIM_H = 1, TF_3DFILTER_DIM_W = 2, TF_3DFILTER_DIM_I = 3, TF_3DFILTER_DIM_O = 4 } TFFilterDims3d; // The dimensions order that MKL-DNN requires for the filter in a grouped // convolution (2D only) typedef enum { MKL_GROUP_FILTER_DIM_G = 0, MKL_GROUP_FILTER_DIM_O = 1, MKL_GROUP_FILTER_DIM_I = 2, MKL_GROUP_FILTER_DIM_H = 3, MKL_GROUP_FILTER_DIM_W = 4 } MklDnnFilterGroupDims; // Enum used to templatize MklOp kernel implementations // that support both fp32 and int8 versions. enum class MklQuantization { QUANTIZED_VERSION, FP_VERSION, }; static const int kSmallBatchSize = 32; #ifdef INTEL_MKL_ML_ONLY class MklShape { public: MklShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy ~MklShape() { if (sizes_) delete[] sizes_; if (strides_) delete[] strides_; if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS); if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS); if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_; } const bool IsMklTensor() const { return isMklTensor_; } void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; } void SetDimensions(const size_t dimension) { dimension_ = dimension; } void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; } void SetMklLayout(const void* primitive, size_t resourceType) { CHECK_EQ( dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive, (dnnResourceType_t)resourceType), E_SUCCESS); } void SetTfLayout(const size_t dimension, const size_t* sizes, const size_t* strides) { dimension_ = dimension; if (dimension > 0) { // MKl doesn't support zero dimension tensors sizes_ = new size_t[dimension]; strides_ = new size_t[dimension]; for (int ii = 0; ii < dimension; ii++) { sizes_[ii] = sizes[ii]; strides_[ii] = strides[ii]; } CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides), E_SUCCESS); } } // Default case - MKL dim ordering is opposite of TF dim ordering // MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim // TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim // For layers that rely on data_format semantics (conv, pooling etc.) // or operate only on certain dimensions (relu, concat, split etc.), // Mkl APIs might require us to reorder these dimensions. In such cases, // kernels should explicitly set this map void SetTfDimOrder(const size_t dimension) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = dimension - (ii + 1); } } void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii]; } } void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { CHECK_EQ(dimension, 4); CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N; } const dnnLayout_t GetMklLayout() const { return mklLayout_; } const dnnLayout_t GetTfLayout() const { return tfLayout_; } const dnnLayout_t GetCurLayout() const { return isMklTensor_ ? mklLayout_ : tfLayout_; } size_t GetDimension() const { return dimension_; } const size_t* GetSizes() const { return sizes_; } int64 dim_size(int index) const { return sizes_[index]; } int64 tf_dim_size(int index) const { return sizes_[tf_to_mkl_dim_map_[index]]; } const size_t* GetStrides() const { return strides_; } const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; } size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Channel dimension. bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Batch dimension. bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Width dimension. bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Height dimension. bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NCHW format. bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NHWC format. bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } void GetConvertedFlatData(dnnLayout_t targetLayout, void* input, void* output) const { dnnLayout_t curLayout; if (isMklTensor_) curLayout = mklLayout_; else curLayout = tfLayout_; dnnPrimitive_t convert; CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout), E_SUCCESS); CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS); CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS); } // The following methods are used for serializing and de-serializing the // contents of the mklshape object. // The data is serialized in this order // isMklTensor_ // dimension_ // sizes_ // strides_ // mklLayout_ // tfLayout_ // tf_to_mkl_dim_map_ #define SIZE_OF_MKL_DNN_BUF \ (dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to // serialize dnn_layout pointer // Size of buffer to hold the serialized object, the size is computed as // follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) + // sizeof(strides_) // + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer) // + sizeof(tf_to_mkl_dim_map_) #define SIZE_OF_MKL_SERIAL_DATA(dims) \ (2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF) // First we need to define some macro for offsets into the serial buffer where // different elements of Mklshape is written/read from #define IS_MKL_TENSOR_OFFSET 0 // Location from start of buffer where isMklTensor_ is serialized #define DIMS_OFFSET \ (IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_ // Location of sizes. Note dim is not used here, left here // to make macros consistent. #define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t)) #define STRIDES_OFFSET(dims) \ (SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides #define MKL_LAYOUT_OFFSET(dims) \ (STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_ #define TF_LAYOUT_OFFSET(dims) \ (MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_ // Location of tf_to_mkl_dim_map_ #define TF_TO_MKL_DIM_MAP_OFFSET(dims) \ (TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // TODO(agramesh1) make sure to create a const to share with rewrite pass // for min size of MKL metadata tensor. void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) { CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize"; // Make sure buffer holds at least isMklTensor_ isMklTensor_ = *reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0; if (isMklTensor_) { // If it is an MKL Tensor then read the rest dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET)); CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small in DeSerialize"; sizes_ = new size_t[dimension_]; strides_ = new size_t[dimension_]; tf_to_mkl_dim_map_ = new size_t[dimension_]; for (int i = 0; i < dimension_; i++) { sizes_[i] = reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i]; strides_[i] = reinterpret_cast<const size_t*>( buf + STRIDES_OFFSET(dimension_))[i]; tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>( buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i]; } CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } void SerializeMklShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small to Serialize"; *reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) = isMklTensor_ ? 1 : 0; if (isMklTensor_) { *(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_; for (int i = 0; i < dimension_; i++) { reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] = sizes_[i]; reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] = strides_[i]; reinterpret_cast<size_t*>(buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] = tf_to_mkl_dim_map_[i]; } CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ( dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } private: bool isMklTensor_ = false; // Flag to indicate if the tensor is an MKL tensor or not dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding // Tensorflow tensor, used when conversion from MKL to standard tensor size_t dimension_ = 0; size_t* sizes_ = nullptr; // Required by MKL for conversions size_t* strides_ = nullptr; // Required by MKL for conversions size_t* tf_to_mkl_dim_map_ = nullptr; // TF dimension corresponding to this MKL dimension }; #else // Forward decl TensorFormat MklDnn3DDataFormatToTFDataFormat(memory::format format); TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format); memory::dims CalculateTFStrides(const memory::dims& dims_tf_order); memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype); class MklDnnShape { private: typedef struct { /// Flag to indicate if the tensor is an MKL tensor or not bool is_mkl_tensor_ = false; /// Number of dimensions in Tensorflow format size_t dimension_ = 0; /// Required by MKLDNN for conversions mkldnn_dims_t sizes_; // Required by MKL for conversions memory::format tf_data_format_ = memory::format::format_undef; memory::data_type T_ = memory::data_type::data_undef; // MKL layout mkldnn_memory_desc_t mkl_md_; /// TF dimension corresponding to this MKL dimension mkldnn_dims_t map_; } MklShapeData; MklShapeData data_; typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t; #define INVALID_DIM_SIZE -1 public: MklDnnShape() { for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); ++i) { data_.sizes_[i] = -1; } for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) { data_.map_[i] = -1; } } ~MklDnnShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy /// Helper function to compare memory::desc objects for MklDnn. /// May be this should go into MklDnn directly. inline bool CompareMklDnnLayouts(const memory::desc& md1, const memory::desc& md2) const { mkldnn_memory_desc_t mdd1 = md1.data; mkldnn_memory_desc_t mdd2 = md2.data; const char* d1 = reinterpret_cast<const char*>(&mdd1); const char* d2 = reinterpret_cast<const char*>(&mdd2); size_t md_size = sizeof(mdd1); for (size_t i = 0; i < md_size; i++) { if (*d1++ != *d2++) { return false; } } return true; } /// Equality function for MklDnnShape objects /// @return true if both are equal; false otherwise. inline bool operator==(const MklDnnShape& input_shape) const { if (this->IsMklTensor() != input_shape.IsMklTensor()) { return false; } // If input tensors are in Mkl layout, then we check for dimensions and // sizes. if (this->IsMklTensor()) { return this->GetTfShape() == input_shape.GetTfShape() && CompareMklDnnLayouts(this->GetMklLayout(), input_shape.GetMklLayout()); } return true; } /// Equality operator for MklDnnShape and TFShape. /// Returns: true if TF shapes for both are the same, false otherwise inline bool operator==(const TensorShape& input_shape) const { if (!this->IsMklTensor()) { return false; } return this->GetTfShape() == input_shape; } inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; } inline void SetMklTensor(bool is_mkl_tensor) { data_.is_mkl_tensor_ = is_mkl_tensor; } inline void SetDimensions(const size_t dimension) { data_.dimension_ = dimension; } inline size_t GetDimension(char dimension) const { int index = GetMklDnnTensorDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline size_t GetDimension3D(char dimension) const { int index = GetMklDnnTensor3DDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline int32 GetMklDnnTensorDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims::Dim_N; case 'C': return MklDnnDims::Dim_C; case 'H': return MklDnnDims::Dim_H; case 'W': return MklDnnDims::Dim_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline int32 GetMklDnnTensor3DDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims3D::Dim3d_N; case 'C': return MklDnnDims3D::Dim3d_C; case 'D': return MklDnnDims3D::Dim3d_D; case 'H': return MklDnnDims3D::Dim3d_H; case 'W': return MklDnnDims3D::Dim3d_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline size_t GetDimension() const { return data_.dimension_; } inline const int* GetSizes() const { return reinterpret_cast<const int*>(&data_.sizes_[0]); } // Returns an mkldnn::memory::dims object that contains the sizes of this // MklDnnShape object. inline memory::dims GetSizesAsMklDnnDims() const { memory::dims retVal; if (data_.is_mkl_tensor_) { size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); for (size_t i = 0; i < dimensions; i++) { if (data_.sizes_[i] != INVALID_DIM_SIZE) retVal.push_back(data_.sizes_[i]); } } else { CHECK_EQ(data_.is_mkl_tensor_, true); } return retVal; } inline int64 DimSize(int index) const { CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0])); return data_.sizes_[index]; } /// Return TensorShape that describes the Tensorflow shape of the tensor /// represented by this MklShape. inline TensorShape GetTfShape() const { CHECK_EQ(data_.is_mkl_tensor_, true); std::vector<int32> shape(data_.dimension_, -1); if (data_.tf_data_format_ != memory::format::blocked) { for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[TfDimIdx(idx)]; } } else { // If Tensorflow shape is in Blocked format, then we don't have dimension // map for it. So we just create Tensorflow shape from sizes in the // specified order. for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[idx]; } } TensorShape ts; bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok(); CHECK_EQ(ret, true); return ts; } inline void SetElemType(memory::data_type dt) { data_.T_ = dt; } inline const memory::data_type GetElemType() { return data_.T_; } inline void SetMklLayout(memory::primitive_desc* pd) { CHECK_NOTNULL(pd); data_.mkl_md_ = pd->desc().data; } inline void SetMklLayout(memory::desc* md) { CHECK_NOTNULL(md); data_.mkl_md_ = md->data; } inline const memory::desc GetMklLayout() const { return memory::desc(data_.mkl_md_); } inline memory::format GetTfDataFormat() const { return data_.tf_data_format_; } /// We don't create primitive_descriptor for TensorFlow layout now. /// We use lazy evaluation and create it only when needed. Input format can /// also be Blocked format. inline void SetTfLayout(size_t dims, const memory::dims& sizes, memory::format format) { DCHECK_EQ(dims, sizes.size()) << "SetTfLayout: Number of dimensions does not" "match with dimension array"; data_.dimension_ = dims; for (size_t ii = 0; ii < dims; ii++) { data_.sizes_[ii] = sizes[ii]; } data_.tf_data_format_ = format; if (format != memory::format::blocked) { SetTfDimOrder(dims, format); } } inline void SetTfLayout2D(size_t dims, const memory::dims& sizes, memory::format format) { DCHECK_EQ(dims, sizes.size()) << "SetTfLayout2D: Number of dimensions does not" "match with dimension array"; data_.dimension_ = dims; for (size_t ii = 0; ii < dims; ++ii) { data_.sizes_[ii] = sizes[ii]; } data_.tf_data_format_ = format; if (format != memory::format::blocked) { data_.map_[0] = MklDnnDims::Dim_N; data_.map_[1] = MklDnnDims::Dim_C; } } inline const memory::desc GetTfLayout() const { memory::dims dims; for (size_t ii = 0; ii < data_.dimension_; ii++) { dims.push_back(data_.sizes_[ii]); } // Create Blocked memory desc if input TF format was set like that. if (data_.tf_data_format_ == memory::format::blocked) { auto strides = CalculateTFStrides(dims); return CreateBlockedMemDescHelper(dims, strides, data_.T_); } else { return memory::desc(dims, data_.T_, data_.tf_data_format_); } } inline const memory::desc GetCurLayout() const { return IsMklTensor() ? GetMklLayout() : GetTfLayout(); } // nhasabni - I've removed SetTfDimOrder that was setting default order in // case of MKL-ML. We don't need a case of default dimension order because // when an operator that does not get data_format attribute gets all inputs // in Tensorflow format, it will produce output in Tensorflow format. inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) { CHECK(dimension == data_.dimension_); for (size_t ii = 0; ii < dimension; ii++) { data_.map_[ii] = map[ii]; } } inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { if (dimension == 5) { CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<3>(data_format, '0')] = MklDnnDims3D::Dim3d_D; data_.map_[GetTensorDimIndex<3>(data_format, '1')] = MklDnnDims3D::Dim3d_H; data_.map_[GetTensorDimIndex<3>(data_format, '2')] = MklDnnDims3D::Dim3d_W; data_.map_[GetTensorDimIndex<3>(data_format, 'C')] = MklDnnDims3D::Dim3d_C; data_.map_[GetTensorDimIndex<3>(data_format, 'N')] = MklDnnDims3D::Dim3d_N; } else { CHECK_EQ(dimension, 4); CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W; data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H; data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C; data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N; } } inline void SetTfDimOrder(const size_t dimension, memory::format format) { TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format); SetTfDimOrder(dimension, data_format); } inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; } inline size_t TfDimIdx(int index) const { return data_.map_[index]; } inline int64 TfDimSize(int index) const { return data_.sizes_[TfDimIdx(index)]; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Channel dimension. inline bool IsMklChannelDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_C; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Batch dimension. inline bool IsMklBatchDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_N; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Width dimension. inline bool IsMklWidthDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_W; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Height dimension. inline bool IsMklHeightDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_H; } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NCHW format. inline bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NHWC format. inline bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// The following methods are used for serializing and de-serializing the /// contents of the mklshape object. /// The data is serialized in this order /// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_; /// Size of buffer to hold the serialized object, the size is computed by /// following above mentioned order inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); } void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small to SerializeMklDnnShape"; *reinterpret_cast<MklShapeData*>(buf) = data_; } void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) { // Make sure buffer holds at least is_mkl_tensor_. CHECK(buf_size >= sizeof(data_.is_mkl_tensor_)) << "Buffer size is too small in DeSerializeMklDnnShape"; const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf); if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small in DeSerializeMklDnnShape"; data_ = *reinterpret_cast<const MklShapeData*>(buf); } } }; #endif // List of MklShape objects. Used in Concat/Split layers. #ifndef INTEL_MKL_ML_ONLY typedef std::vector<MklDnnShape> MklDnnShapeList; #else typedef std::vector<MklShape> MklShapeList; #endif #ifdef INTEL_MKL_ML_ONLY // Check if all tensors specified by MklShapes are MKL tensors. inline bool AreAllMklTensors(const MklShapeList& shapes) { for (auto& s : shapes) { if (!s.IsMklTensor()) { return false; } } return true; } template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklShape& mkl_shape) { Tensor output_tensor; TensorShape output_shape; for (size_t j = 0; j < mkl_shape.GetDimension(); j++) { // Outermost to innermost dimension output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]); } // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout()); void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data()); void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data()); if (mkl_tensor.NumElements() != 0) { mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer); } return output_tensor; } #else using mkldnn::stream; template <typename T> class MklDnnData; template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklDnnShape& mkl_shape) { Tensor output_tensor; try { if (!mkl_shape.IsMklTensor()) return mkl_tensor; // return input since it is already TF tensor TensorShape output_shape = mkl_shape.GetTfShape(); // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); auto cpu_engine = engine(engine::cpu, 0); MklDnnData<T> input(&cpu_engine); // Get Mkl layout of input tensor. auto input_mkl_md = mkl_shape.GetMklLayout(); auto output_tf_md = mkl_shape.GetTfLayout(); auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine); input.SetUsrMem(input_mkl_md, &mkl_tensor); // reorder if (input.IsReorderNeeded(output_tf_pd)) { std::vector<primitive> net; CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net), true); stream(stream::kind::eager).submit(net).wait(); } else { // If not, just forward input tensor to output tensor. CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape)); } } catch (mkldnn::error& e) { string error_msg = "Status: " + std::to_string(e.status) + ", message: " + string(e.message) + ", in file " + string(__FILE__) + ":" + std::to_string(__LINE__); LOG(FATAL) << "Operation received an exception: " << error_msg; } return output_tensor; } #endif // Get the MKL shape from the second string tensor #ifdef INTEL_MKL_ML_ONLY inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) { mklshape->DeSerializeMklShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #else inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) { mklshape->DeSerializeMklDnnShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #endif // Gets the actual input inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) { return ctext->input(GetTensorDataIndex(n, ctext->num_inputs())); } inline void GetMklInputList(OpKernelContext* ctext, StringPiece name, OpInputList* input_tensors) { CHECK_NOTNULL(input_tensors); ctext->input_list(name, input_tensors); } #ifdef INTEL_MKL_ML_ONLY inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #else inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklDnnShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklDnnShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #endif #ifndef INTEL_MKL_ML_ONLY /// Get shape of input tensor pointed by 'input_idx' in TensorShape format. /// If the input tensor is in MKL layout, then obtains TensorShape from /// MklShape. inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) { // Sanity check. CHECK_NOTNULL(context); CHECK_LT(input_idx, context->num_inputs()); MklDnnShape input_mkl_shape; GetMklShape(context, input_idx, &input_mkl_shape); if (input_mkl_shape.IsMklTensor()) { return input_mkl_shape.GetTfShape(); } else { const Tensor& t = MklGetInput(context, input_idx); return t.shape(); } } #endif #ifdef INTEL_MKL_ML_ONLY // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif #ifdef INTEL_MKL_ML_ONLY // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #else // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif // Allocates a temp tensor and returns the data buffer for temporary storage. // Currently #ifndef INTEL_MKL_ML_ONLY template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, const memory::primitive_desc& pd, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim(pd.get_size() / sizeof(T) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<T>().data()); } #else inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, dnnLayout_t lt_buff, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim( dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) / sizeof(float) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<float>().data()); } #endif template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, TensorShape tf_shape) { OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); } inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides, const size_t* sizes) { // MKL requires strides in NCHW if (data_format == FORMAT_NHWC) { strides[0] = sizes[2]; strides[1] = sizes[0] * sizes[2]; strides[2] = 1; strides[3] = sizes[0] * sizes[1] * sizes[2]; } else { strides[0] = 1; strides[1] = sizes[0]; strides[2] = sizes[0] * sizes[1]; strides[3] = sizes[0] * sizes[1] * sizes[2]; } } #ifdef INTEL_MKL_ML_ONLY inline void MklSizesToTFSizes(OpKernelContext* context, TensorFormat data_format_, const MklShape& mkl_shape, TensorShape* tf_shape) { size_t tf_dim = mkl_shape.GetDimension(); const size_t* tf_sizes = mkl_shape.GetSizes(); OP_REQUIRES(context, tf_dim == 4, errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim")); std::vector<int32> sizes; sizes.push_back(tf_sizes[3]); if (data_format_ == FORMAT_NHWC) { sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); sizes.push_back(tf_sizes[2]); } else { sizes.push_back(tf_sizes[2]); sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); } OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape)); } #endif inline int32 GetMklTensorDimIndex(char dimension) { switch (dimension) { case 'N': return MklDims::N; case 'C': return MklDims::C; case 'H': return MklDims::H; case 'W': return MklDims::W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } #ifdef INTEL_MKL_ML_ONLY inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) { int index = GetMklTensorDimIndex(dimension); CHECK(index >= 0 && index < mkl_shape.GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return mkl_shape.dim_size(index); } #endif inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); const Tensor& meta = context->input(idx_meta_in); Tensor output(data.dtype()); Tensor meta_output(meta.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, data.shape())); CHECK(meta_output.CopyFrom(meta, meta.shape())); context->set_output(idx_data_out, output); context->set_output(idx_meta_out, meta_output); } #ifdef INTEL_MKL_ML_ONLY inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #else inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...) CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #endif #ifdef INTEL_MKL_ML_ONLY inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #else inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklDnnShape dnn_shape_output; dnn_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, dnn_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifndef INTEL_MKL_ML_ONLY // Set a dummy MKLDNN shape (called when the output is in TF format) inline void SetDummyMklDnnShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context, int idx_in, int idx_out, const MklDnnShape& mkl_shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); AllocateOutputSetMklShape(context, idx_out, mkl_shape); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif // Forward the MKL shape ONLY (used in elementwise and other ops where // we call the eigen implementation and MKL shape is not used) inline void ForwardMklMetaDataInToOut(OpKernelContext* context, uint32 idx_data_in, uint32_t idx_data_out) { uint32 idx_meta_in = GetTensorMetaDataIndex(idx_data_in, context->num_inputs()); uint32 idx_meta_out = GetTensorMetaDataIndex(idx_data_out, context->num_outputs()); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifdef INTEL_MKL_ML_ONLY // Set a dummy MKL shape (called when the output is in TF format) inline void SetDummyMklShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } // We don't need these functions in MKLDNN. We have defined equality operator // on MklDnnShape class directly. // Checks if the TF shape for both MKL tensors is the same or not // Returns: true if both TF shapes are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const MklShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const MklShape* input_shape_1) { return MklCompareShapes(input_shape_1, input_shape_0); } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->dims() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->dims(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // These functions do not compile with MKL-DNN since mkl.h is missing. // We may need to remove them later. // TODO(intel_tf): Remove this routine when faster MKL layout conversion is // out. inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = input.dim_size(0); int64 H = input.dim_size(1); int64 W = input.dim_size(2); int64 C = input.dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C, buf_out + n * stride_n, H * W); } } inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = (*output)->dim_size(0); int64 H = (*output)->dim_size(1); int64 W = (*output)->dim_size(2); int64 C = (*output)->dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W, buf_out + n * stride_n, C); } } #endif // ------------------------------------------------------------------- #ifndef INTEL_MKL_ML_ONLY /// Return MKL-DNN data type (memory::data_type) for input type T /// /// @input None /// @return memory::data_type corresponding to type T template <typename T> static memory::data_type MklDnnType(); /// Instantiation for float type. Add similar instantiations for other /// type if needed. template <> memory::data_type MklDnnType<float>() { return memory::data_type::f32; } template <> memory::data_type MklDnnType<quint8>() { return memory::data_type::u8; } template <> memory::data_type MklDnnType<qint8>() { return memory::data_type::s8; } template <> memory::data_type MklDnnType<qint32>() { return memory::data_type::s32; } template <> memory::data_type MklDnnType<bfloat16>() { // TODO(nhasabni): Enable MKL-DNN bfloat16 type later. // Currently, falling back to f32 to get compilation working. return memory::data_type::f32; } /// Map TensorFlow's data format into MKL-DNN 3D data format /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnn3DDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::ndhwc; else if (format == FORMAT_NCHW) return memory::format::ncdhw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); return memory::format::format_undef; } /// Map TensorFlow's data format into MKL-DNN data format /// /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::nhwc; else if (format == FORMAT_NCHW) return memory::format::nchw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); return memory::format::format_undef; } /// Map MKL-DNN data format to TensorFlow's data format /// /// @input: memory::format /// @return: Tensorflow data format corresponding to memory::format /// Fails with an error if invalid data format. inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) { if (format == memory::format::nhwc || format == memory::format::ndhwc) return FORMAT_NHWC; else if (format == memory::format::nchw || format == memory::format::ncdhw) return FORMAT_NCHW; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); // Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure // that we don't come here. return FORMAT_NHWC; } /// Map TensorShape object into memory::dims required by MKL-DNN /// /// This function will simply map input TensorShape into MKL-DNN dims /// naively. So it will preserve the order of dimensions. E.g., if /// input tensor is in NHWC format, then dims will be in NHWC format /// also. /// /// @input TensorShape object in shape /// @return memory::dims corresponding to TensorShape inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) { memory::dims dims(shape.dims()); for (int d = 0; d < shape.dims(); ++d) { dims[d] = shape.dim_size(d); } return dims; } /// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN /// /// This function is a specific one than above function. It will map input /// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the /// order of dimensions. E.g., if input tensor is in NHWC format, then dims /// will be in NCHW format, and not in NHWC format. /// /// @input TensorShape object in shape /// @return memory::dims in MKL-DNN required NCHW format inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex(format, 'N')); int c = shape.dim_size(GetTensorDimIndex(format, 'C')); int h = shape.dim_size(GetTensorDimIndex(format, 'H')); int w = shape.dim_size(GetTensorDimIndex(format, 'W')); // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } inline memory::dims TFShapeToMklDnnDimsInNCDHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnn3DDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex<3>(format, 'N')); int c = shape.dim_size(GetTensorDimIndex<3>(format, 'C')); int d = shape.dim_size(GetTensorDimIndex<3>(format, '0')); int h = shape.dim_size(GetTensorDimIndex<3>(format, '1')); int w = shape.dim_size(GetTensorDimIndex<3>(format, '2')); // MKL-DNN requires dimensions in NCDHW format. return memory::dims({n, c, d, h, w}); } /// Overloaded version of function above. Input parameters are /// self-explanatory. inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = in_dims[GetTensorDimIndex(format, 'N')]; int c = in_dims[GetTensorDimIndex(format, 'C')]; int h = in_dims[GetTensorDimIndex(format, 'H')]; int w = in_dims[GetTensorDimIndex(format, 'W')]; // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } /// Map MklDnn memory::dims object into TensorShape object. /// /// This function will simply map input shape in MKL-DNN memory::dims format /// in Tensorflow's TensorShape object by preserving dimension order. /// /// @input MKL-DNN memory::dims object /// @output TensorShape corresponding to memory::dims inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) { std::vector<int32> shape(dims.size(), -1); for (int d = 0; d < dims.size(); d++) { shape[d] = dims[d]; } TensorShape ret; CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true); return ret; } /// Function to calculate strides given tensor shape in Tensorflow order /// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention, /// dimension with size 1 is outermost dimension; while dimension with size 4 is /// innermost dimension. So strides for this tensor would be {4 * 3 * 2, /// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}. /// /// @input Tensorflow shape in memory::dims type /// @return memory::dims containing strides for the tensor. inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) { CHECK_GT(dims_tf_order.size(), 0); memory::dims strides(dims_tf_order.size()); int last_dim_idx = dims_tf_order.size() - 1; strides[last_dim_idx] = 1; for (int d = last_dim_idx - 1; d >= 0; d--) { strides[d] = strides[d + 1] * dims_tf_order[d + 1]; } return strides; } inline padding_kind TFPaddingToMklDnnPadding(Padding pad) { // MKL-DNN only supports zero padding. return padding_kind::zero; } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype) { CHECK_EQ(dim.size(), strides.size()); // We have to construct memory descriptor in a C style. This is not at all // ideal but MKLDNN does not offer any API to construct descriptor in // blocked format except a copy constructor that accepts // mkldnn_memory_desc_t. mkldnn_memory_desc_t md; md.primitive_kind = mkldnn_memory; md.ndims = dim.size(); md.format = mkldnn_blocked; md.data_type = memory::convert_to_c(dtype); for (size_t i = 0; i < dim.size(); i++) { md.layout_desc.blocking.block_dims[i] = 1; md.layout_desc.blocking.strides[1][i] = 1; md.layout_desc.blocking.strides[0][i] = strides[i]; md.layout_desc.blocking.padding_dims[i] = dim[i]; md.layout_desc.blocking.offset_padding_to_data[i] = 0; md.dims[i] = dim[i]; } md.layout_desc.blocking.offset_padding = 0; return memory::desc(md); } template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to); /* * Class to represent all the resources corresponding to a tensor in TensorFlow * that are required to execute an operation (such as Convolution). */ template <typename T> class MklDnnData { private: /// MKL-DNN memory primitive for input user memory memory* user_memory_; /// MKL-DNN memory primitive in case input or output reorder is needed. memory* reorder_memory_; /// Operations memory descriptor memory::desc* op_md_; // flat to indicate if data is 3D or not. bool bIs3D; /// Operations temp buffer void* allocated_buffer_; /// CPU engine on which operation will be executed const engine* cpu_engine_; public: explicit MklDnnData(const engine* e) : user_memory_(nullptr), reorder_memory_(nullptr), op_md_(nullptr), allocated_buffer_(nullptr), cpu_engine_(e) {} ~MklDnnData() { if (allocated_buffer_ != nullptr) { cpu_allocator()->DeallocateRaw(allocated_buffer_); } cpu_engine_ = nullptr; // We don't own this. delete (user_memory_); delete (reorder_memory_); delete (op_md_); } inline void* GetTensorBuffer(const Tensor* tensor) const { CHECK_NOTNULL(tensor); return const_cast<void*>( static_cast<const void*>(tensor->flat<T>().data())); } void SetIs3DData(bool bIs3D_) { bIs3D = bIs3D_; } bool GetIs3D() { return bIs3D; } /// Set user memory primitive using specified dimensions, memory format and /// data_buffer. Function automatically uses element data type by using /// input type T used for creating call object. /// /// In a nutshell, function allows user to describe the input tensor to /// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and /// memory format HWIO, and the buffer that contains actual values is /// pointed by data_buffer. inline void SetUsrMem(const memory::dims& dim, memory::format fm, void* data_buffer = nullptr) { auto md = memory::desc(dim, MklDnnType<T>(), fm); SetUsrMem(md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, memory::format fm, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, fm, GetTensorBuffer(tensor)); } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim, const memory::dims& strides) { return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>()); } /// A version of SetUsrMem call that allows user to create memory in blocked /// format. So in addition to accepting dimensions, it also accepts strides. /// This allows user to create memory for tensor in a format that is not /// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6 /// dimensional tensor as a native format. But by using blocked format, a user /// can create memory for 6D tensor. inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, void* data_buffer = nullptr) { CHECK_EQ(dim.size(), strides.size()); auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides); SetUsrMem(blocked_md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, strides, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts memory /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) { auto pd = memory::primitive_desc(md, *cpu_engine_); SetUsrMem(pd, data_buffer); } /// A version of SetUsrMem with memory descriptor and tensor inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(md, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts primitive /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::primitive_desc& pd, void* data_buffer = nullptr) { CHECK_NOTNULL(cpu_engine_); if (user_memory_) delete user_memory_; // TODO(nhasabni): can we remove dynamic memory allocation? if (data_buffer) { user_memory_ = new memory(pd, data_buffer); } else { user_memory_ = new memory(pd); } } /// A version of SetUsrMem with primitive descriptor and tensor inline void SetUsrMem(const memory::primitive_desc& pd, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(pd, GetTensorBuffer(tensor)); } /// Get function for user memory primitive. inline const memory* GetUsrMem() const { return user_memory_; } /// Get function for primitive descriptor of user memory primitive. inline const memory::primitive_desc GetUsrMemPrimDesc() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_primitive_desc(); } /// Get function for descriptor of user memory. inline memory::desc GetUsrMemDesc() { // This is ugly. Why MKL-DNN does not provide desc() method of const type?? const memory::primitive_desc pd = GetUsrMemPrimDesc(); return const_cast<memory::primitive_desc*>(&pd)->desc(); } /// Get function for data buffer of user memory primitive. inline void* GetUsrMemDataHandle() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_data_handle(); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(void* data_buffer) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(data_buffer); user_memory_->set_data_handle(data_buffer); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(const Tensor* tensor) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(tensor); user_memory_->set_data_handle(GetTensorBuffer(tensor)); } /// allocate function for data buffer inline void AllocateBuffer(size_t size) { const int64 kMemoryAlginment = 64; // For AVX512 memory alignment. allocated_buffer_ = cpu_allocator()->AllocateRaw(kMemoryAlginment, size); } inline void* GetAllocatedBuffer() { return allocated_buffer_; } /// Get the memory primitive for input and output of an op. If inputs /// to an op require reorders, then this function returns memory primitive /// for reorder. Otherwise, it will return memory primitive for user memory. /// /// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to /// execute Conv2D, we need memory primitive for I and F. Buf if reorder is /// required for I and F (say I_r is reorder primitive for I; F_r is reorder /// primitive for F), then we need I_r and F_r to perform Conv2D. inline const memory& GetOpMem() const { return reorder_memory_ ? *reorder_memory_ : *user_memory_; } /// Set memory descriptor of an operation in terms of dimensions and memory /// format. E.g., For Conv2D, the dimensions would be same as user dimensions /// but memory::format would be mkldnn::any because we want MKL-DNN to choose /// best layout/format for given input dimensions. inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) { // TODO(nhasabni): can we remove dynamic memory allocation? op_md_ = new memory::desc(dim, MklDnnType<T>(), fm); } /// Get function for memory descriptor for an operation inline const memory::desc& GetOpMemDesc() const { return *op_md_; } /// Predicate that checks if we need to reorder user's memory into memory /// pointed by op_pd. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const { CHECK_NOTNULL(user_memory_); return op_pd != user_memory_->get_primitive_desc(); } /// Predicate that checks if we need to reorder user's memory into memory /// based on the provided format. /// /// @input: target_format - memory format of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::format& target_format) const { CHECK_NOTNULL(user_memory_); return target_format != user_memory_->get_primitive_desc().desc().data.format; } /// Function to create a reorder from memory pointed by from to memory pointed /// by to. Returns created primitive. inline primitive CreateReorder(const memory* from, const memory* to) const { CHECK_NOTNULL(from); CHECK_NOTNULL(to); return reorder(*from, *to); } /// Function to handle input reordering /// /// Check if we need to reorder this input of an operation. /// Return true and allocate reorder memory primitive if reorder is needed. /// Otherwise, return false and do not allocate reorder memory primitive. /// /// To check if reorder is needed, this function compares memory primitive /// descriptor of an operation (op_pd) for the given input with the /// user-specified memory primitive descriptor. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately reorder_memory_ = new memory(op_pd); std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Overloaded version of above function that accepts memory buffer /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_data_handle - memory buffer where output of reorder needs to be /// stored. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd, reorder_data_handle); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle) { CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; reorder_memory_ = new memory(op_pd, reorder_data_handle); net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_)); stream(stream::kind::eager).submit(net).wait(); return true; } return false; } /// Another overloaded version of CheckReorderToOpMem that accepts Tensor /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_tensor - Tensor whose buffer is to be used to store output of /// reorder. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net); } /// TODO: this is a faster path with reorder primitive cache compared with /// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove /// slow path in the future inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor) { CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor)); } /// Function to handle output reorder /// /// This function performs very similar functionality as input reordering /// function above. The only difference is that this function does not add /// reorder primitive to the net. The reason for this is: the reorder /// primitive for output needs to be added to the list only after operation /// has executed. But we need to prepare a temporary buffer in case output /// reorder is needed. And this temporary buffer will hold the output of /// an operation before it is fed to reorder primitive. /// /// @input memory primitive descriptor for the given output of an operation /// @return: true in case reorder of output is needed; false, otherwise. inline bool PrepareReorderToUserMemIfReq( const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO(nhasabni): can we remove dynamic memory allocation? reorder_memory_ = new memory(op_pd); return true; } return false; } /// Function to actually insert reorder primitive in the net /// /// This function completes remaining part of output reordering. It inserts /// a reordering primitive from the temporary buffer that holds the output /// to the user-specified output buffer. /// /// @input: net - net to which to add reorder primitive inline void InsertReorderToUserMem(std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); net->push_back(CreateReorder(reorder_memory_, user_memory_)); } /// TODO: this is a faster path with reorder primitive cache compared with /// InsertReorderToUserMem(std::vector<primitive>* net), will remove /// slow path in the future inline void InsertReorderToUserMem() { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); // primitive reuse don't allow two same reorder prim in // one stream, so submit it immediately std::vector<primitive> net; net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_)); stream(stream::kind::eager).submit(net).wait(); } }; /// Base class for operations with reuse of primitives /// class MklPrimitive { public: virtual ~MklPrimitive() {} // Dummy data which MKL DNN never operates on unsigned char* DummyData = nullptr; }; const mkldnn::memory::dims NONE_DIMS = {}; // // LRUCache is a class which implements LRU (Least Recently Used) cache. // The implementation is similar to that of // tensorflow/core/platform/cloud/expiring_lru_cache.h // without its thread-safe part because the cache is supposed to be // used as thread local (for instance, MklPrimitive caching). // // The LRU list maintains objects in chronological order based on // creation time, with the least recently accessed object at the // tail of LRU list, while the most recently accessed object // at the head of LRU list. // // This class is used to maintain an upper bound on the total number of // cached items. When the cache reaches its capacity, the LRU item will // be removed and replaced by a new one from SetOp call. // template <typename T> class LRUCache { public: explicit LRUCache(size_t capacity) { capacity_ = capacity; Clear(); } T* GetOp(const string& key) { auto it = cache_.find(key); if (it == cache_.end()) { return nullptr; } // Move to the front of LRU list as the most recently accessed. lru_list_.erase(it->second.lru_iterator); lru_list_.push_front(it->first); it->second.lru_iterator = lru_list_.begin(); return it->second.op; } void SetOp(const string& key, T* op) { if (lru_list_.size() >= capacity_) { Delete(); } // Insert an entry to the front of the LRU list lru_list_.push_front(key); Entry entry(op, lru_list_.begin()); cache_.emplace(std::make_pair(key, std::move(entry))); } void Clear() { if (lru_list_.empty()) return; // Clean up the cache cache_.clear(); lru_list_.clear(); } private: struct Entry { // The entry's value. T* op; // A list iterator pointing to the entry's position in the LRU list. std::list<string>::iterator lru_iterator; // Constructor Entry(T* op, std::list<string>::iterator it) { this->op = op; this->lru_iterator = it; } // Move construcctor Entry(Entry&& source) noexcept : lru_iterator(std::move(source.lru_iterator)) { op = std::move(source.op); source.op = std::forward<T*>(nullptr); } // Destructor ~Entry() { if (op != nullptr) delete op; } }; // Remove the least recently accessed entry from LRU list, which // is the tail of lru_list_. Update cache_ correspondingly. bool Delete() { if (lru_list_.empty()) return false; string key = lru_list_.back(); lru_list_.pop_back(); cache_.erase(key); return true; } // Cache capacity size_t capacity_; // The cache, a map from string key to a LRU entry. std::unordered_map<string, Entry> cache_; // The LRU list of entries. // The front of the list contains the key of the most recently accessed // entry, while the back of the list is the least recently accessed entry. std::list<string> lru_list_; }; template <typename T> class MklPrimitiveFactory { public: MklPrimitiveFactory() {} ~MklPrimitiveFactory() {} MklPrimitive* GetOp(const string& key) { auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache(); return lru_cache.GetOp(key); } void SetOp(const string& key, MklPrimitive* op) { auto& lru_cache = MklPrimitiveFactory<T>::GetLRUCache(); lru_cache.SetOp(key, op); } /// Function to decide whether HW has AVX512 or AVX2 /// For those legacy device(w/o AVX512 and AVX2), /// MKL-DNN GEMM will be used. static inline bool IsLegacyPlatform() { return (!port::TestCPUFeature(port::CPUFeature::AVX512F) && !port::TestCPUFeature(port::CPUFeature::AVX2)); } /// Fuction to check whether primitive memory optimization is enabled static inline bool IsPrimitiveMemOptEnabled() { bool is_primitive_mem_opt_enabled = true; TF_CHECK_OK(ReadBoolFromEnvVar("TF_MKL_OPTIMIZE_PRIMITIVE_MEMUSE", true, &is_primitive_mem_opt_enabled)); return is_primitive_mem_opt_enabled; } private: static inline LRUCache<MklPrimitive>& GetLRUCache() { static const int kCapacity = 1024; // cache capacity static thread_local LRUCache<MklPrimitive> lru_cache_(kCapacity); return lru_cache_; } }; // utility class for creating keys of MKL primitive pool. class FactoryKeyCreator { public: FactoryKeyCreator() { key_.reserve(kMaxKeyLength); } ~FactoryKeyCreator() {} void AddAsKey(const string& str) { Append(str); } void AddAsKey(const mkldnn::memory::dims& dims) { for (unsigned int i = 0; i < dims.size(); i++) { AddAsKey<int>(dims[i]); } } template <typename T> void AddAsKey(const T data) { auto buffer = reinterpret_cast<const char*>(&data); Append(StringPiece(buffer, sizeof(T))); } string GetKey() { return key_; } private: string key_; const char delimiter = 'x'; const int kMaxKeyLength = 256; void Append(StringPiece s) { key_.append(string(s)); key_.append(1, delimiter); } }; static inline memory::format get_desired_format(int channel, bool is_2d = true) { memory::format fmt_desired = memory::format::any; if (port::TestCPUFeature(port::CPUFeature::AVX512F)) { fmt_desired = is_2d ? memory::format::nChw16c : memory::format::nCdhw16c; } else if (port::TestCPUFeature(port::CPUFeature::AVX2) && (channel % 8) == 0) { fmt_desired = is_2d ? memory::format::nChw8c : memory::format::ncdhw; // no avx2 support for 3d yet. } else { fmt_desired = is_2d ? memory::format::nchw : memory::format::ncdhw; } return fmt_desired; } class MklReorderPrimitive : public MklPrimitive { public: explicit MklReorderPrimitive(const memory* from, const memory* to) { Setup(from, to); } ~MklReorderPrimitive() {} std::shared_ptr<primitive> GetPrimitive() { return context_.reorder_prim; } void SetMemory(const memory* from, const memory* to) { context_.src_mem->set_data_handle(from->get_data_handle()); context_.dst_mem->set_data_handle(to->get_data_handle()); } private: struct ReorderContext { std::shared_ptr<mkldnn::memory> src_mem; std::shared_ptr<mkldnn::memory> dst_mem; std::shared_ptr<primitive> reorder_prim; ReorderContext() : src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {} } context_; engine cpu_engine_ = engine(engine::cpu, 0); void Setup(const memory* from, const memory* to) { context_.src_mem.reset(new memory( {from->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.dst_mem.reset( new memory({to->get_primitive_desc().desc(), cpu_engine_}, DummyData)); context_.reorder_prim = std::make_shared<mkldnn::reorder>( reorder(*context_.src_mem, *context_.dst_mem)); } }; template <typename T> class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> { public: static MklReorderPrimitive* Get(const memory* from, const memory* to) { auto reorderPrim = static_cast<MklReorderPrimitive*>( MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to)); if (reorderPrim == nullptr) { reorderPrim = new MklReorderPrimitive(from, to); MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(from, to, reorderPrim); } reorderPrim->SetMemory(from, to); return reorderPrim; } static MklReorderPrimitiveFactory& GetInstance() { static MklReorderPrimitiveFactory instance_; return instance_; } private: MklReorderPrimitiveFactory() {} ~MklReorderPrimitiveFactory() {} static string CreateKey(const memory* from, const memory* to) { string prefix = "reorder"; FactoryKeyCreator key_creator; auto const& from_desc = from->get_primitive_desc().desc().data; auto const& to_desc = to->get_primitive_desc().desc().data; const int KIdxFirstStride = 0; memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]); memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]); memory::dims from_strides( from_desc.layout_desc.blocking.strides[KIdxFirstStride], &from_desc.layout_desc.blocking .strides[KIdxFirstStride][from_desc.ndims]); memory::dims to_strides( to_desc.layout_desc.blocking.strides[KIdxFirstStride], &to_desc.layout_desc.blocking.strides[KIdxFirstStride][to_desc.ndims]); key_creator.AddAsKey(prefix); key_creator.AddAsKey(static_cast<int>(from_desc.format)); key_creator.AddAsKey(static_cast<int>(from_desc.data_type)); key_creator.AddAsKey(from_dims); key_creator.AddAsKey(from_strides); key_creator.AddAsKey(static_cast<int>(to_desc.format)); key_creator.AddAsKey(static_cast<int>(to_desc.data_type)); key_creator.AddAsKey(to_dims); key_creator.AddAsKey(to_strides); return key_creator.GetKey(); } MklPrimitive* GetReorder(const memory* from, const memory* to) { string key = CreateKey(from, to); return this->GetOp(key); } void SetReorder(const memory* from, const memory* to, MklPrimitive* op) { string key = CreateKey(from, to); this->SetOp(key, op); } }; /// Fuction to find(or create) a reorder from memory pointed by /// from to memory pointed by to, it will created primitive or /// get primitive from pool if it is cached. /// Returns the primitive. template <typename T> inline primitive FindOrCreateReorder(const memory* from, const memory* to) { CHECK_NOTNULL(from); CHECK_NOTNULL(to); MklReorderPrimitive* reorder_prim = MklReorderPrimitiveFactory<T>::Get(from, to); return *reorder_prim->GetPrimitive(); } // utility function to determine if it is conv 1x1 and stride != 1 // for purpose of temporarily disabling primitive reuse inline bool IsConv1x1StrideNot1(memory::dims filter_dims, memory::dims strides) { if (filter_dims.size() != 4 || strides.size() != 2) return false; return ((filter_dims[2] == 1) && (filter_dims[3] == 1) && ((strides[0] != 1) || (strides[1] != 1))); } #endif // INTEL_MKL_DNN } // namespace tensorflow #endif // INTEL_MKL #endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
parallel.h
// @file parallel.h This file contains the functionality for parallel operation // @author TPOC: contact@palisade-crypto.org // // @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT) // All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. THIS SOFTWARE IS // PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef SRC_CORE_LIB_UTILS_PARALLEL_H_ #define SRC_CORE_LIB_UTILS_PARALLEL_H_ #ifdef PARALLEL #include "omp.h" #endif // #include <iostream> namespace lbcrypto { class ParallelControls { int machineThreads; public: // @Brief CTOR, enables parallel operations as default // Cache the number of machine threads the system reports (can be // overridden by environment variables) // enable on startup by default ParallelControls() { #ifdef PARALLEL machineThreads = omp_get_max_threads(); Enable(); #else machineThreads = 1; #endif } // @Brief Enable() enables parallel operation void Enable() { #ifdef PARALLEL omp_set_num_threads(machineThreads); #endif } // @Brief Disable() disables parallel operation void Disable() { #ifdef PARALLEL omp_set_num_threads(0); #endif } int GetMachineThreads() const { return machineThreads; } static int GetNumProcs() { #ifdef PARALLEL return omp_get_num_procs(); #else return 1; #endif } // @Brief returns current number of threads that are usable // @return int # threads int GetNumThreads() { #ifdef PARALLEL int nthreads = 1; int tid = 1; // Fork a team of threads giving them their own copies of variables // so we can see how many threads we have to work with #pragma omp parallel private(tid) { /* Obtain thread number */ tid = omp_get_thread_num(); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); } } // std::cout << "\nNumber of threads = " << nthreads << std::endl; return nthreads; #else return machineThreads; #endif } // @Brief sets number of threads to use (limited by system value) void SetNumThreads(int nthreads) { #ifdef PARALLEL // set number of thread, but limit it to the system set // number of machine threads... if (nthreads > machineThreads) { nthreads = machineThreads; } omp_set_num_threads(nthreads); #endif } }; extern ParallelControls PalisadeParallelControls; } // namespace lbcrypto #endif /* SRC_CORE_LIB_UTILS_PARALLEL_H_ */
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(8*t1+Ny+13,32)),floord(16*t2+Ny+12,32)),floord(16*t1-16*t2+Nz+Ny+11,32));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1020,1024)),ceild(32*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(8*t1+Nx+13,1024)),floord(16*t2+Nx+12,1024)),floord(32*t3+Nx+28,1024)),floord(16*t1-16*t2+Nz+Nx+11,1024));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),32*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),32*t3+30),1024*t4+1022),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unaryop__ainv_int8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_uint32 // op(A') function: GB_tran__ainv_int8_uint32 // C type: int8_t // A type: uint32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_uint32 ( int8_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__abs_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_uint64_uint64) // op(A') function: GB (_unop_tran__abs_uint64_uint64) // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_uint64_uint64) ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_uint64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
x_solve.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB BT code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include "work_lhs.h" #include "timers.h" //--------------------------------------------------------------------- // // Performs line solves in X direction by first factoring // the block-tridiagonal matrix into an upper triangular matrix, // and then performing back substitution to solve for the unknow // vectors of each line. // // Make sure we treat elements zero to cell_size in the direction // of the sweep. // //--------------------------------------------------------------------- void x_solve() { int i, j, k, m, n, isize; isize = PROBLEM_SIZE-1; double pivot3, coeff3; double pivot2, coeff2; double pivot1, coeff1; //--------------------------------------------------------------------- // This function computes the left hand side in the xi-direction //--------------------------------------------------------------------- //threadprivate() variables simplified (no duplicates) //work_lhs.h: #pragma omp threadprivate(fjac,njac,lhs,tmp1,tmp2,tmp3) //header.h:#pragma omp threadprivate(cuf,q,ue,buf) // fjac[][][] and njac[][][] need to be privatized with private() to enable // parallelization. The first i loop writes in the two arrays and then these // arrays are read later. // The arrays u[][][], square[][][], rho_i[][][] and qs[][][] are live-in arrays. They are // read in the first i loop. // lhs[][][] needs also privatization with openmp private(), it is accessed a // write access in a loop and the read in a following loop (these two loops are // a part of the same SCC). lhs[][][] is used all over the program as a // temporary scalar (do calculations and store them in lhs[][][] then in the // following loop use these calculations). A full fusion of i loops can enable // contraction to reduce the dimensions of lhs[][][][] from 4D to 3D, because // lhs[][][][] is always used as lhs[i][][][]. Each i loop write in // lhs[i][*][*][*] and the following loops read from lhs[i][*][*][*], so if we // fuse the i loops we will not need the i dimension since the fused loop will // write in lhs[*][*][*] and directly read the lhs[*][*][*] in taht iteration. // The code uses i loops because originally the loops were a part of different // functions (modularity), now that inlining is applied, we can fuse the loops // and contract lhs[][][][] into lhs[][][]. I didn't verify for the rest of // the arrays and dependences whether they prevent loop fusion or not. // rhs[k][][][] does not need privatization, it is the array that holds the result // of the K loop. Different k iterations write in different places in the // array. //#pragma omp parallel for default(shared) shared(isize) private(i,j,k,m,n) #pragma scop for (k = 1; k <= PROBLEM_SIZE-2; k++) { for (j = 1; j <= PROBLEM_SIZE-2; j++) { for (i = 0; i <= isize; i++) { tmp1 = rho_i[k][j][i]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[i][0][0] = 0.0; fjac[i][1][0] = 1.0; fjac[i][2][0] = 0.0; fjac[i][3][0] = 0.0; fjac[i][4][0] = 0.0; fjac[i][0][1] = -(u[k][j][i][1] * tmp2 * u[k][j][i][1]) + c2 * qs[k][j][i]; fjac[i][1][1] = ( 2.0 - c2 ) * ( u[k][j][i][1] / u[k][j][i][0] ); fjac[i][2][1] = - c2 * ( u[k][j][i][2] * tmp1 ); fjac[i][3][1] = - c2 * ( u[k][j][i][3] * tmp1 ); fjac[i][4][1] = c2; fjac[i][0][2] = - ( u[k][j][i][1]*u[k][j][i][2] ) * tmp2; fjac[i][1][2] = u[k][j][i][2] * tmp1; fjac[i][2][2] = u[k][j][i][1] * tmp1; fjac[i][3][2] = 0.0; fjac[i][4][2] = 0.0; fjac[i][0][3] = - ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2; fjac[i][1][3] = u[k][j][i][3] * tmp1; fjac[i][2][3] = 0.0; fjac[i][3][3] = u[k][j][i][1] * tmp1; fjac[i][4][3] = 0.0; fjac[i][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] ) * ( u[k][j][i][1] * tmp2 ); fjac[i][1][4] = c1 * u[k][j][i][4] * tmp1 - c2 * ( u[k][j][i][1]*u[k][j][i][1] * tmp2 + qs[k][j][i] ); fjac[i][2][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][1] ) * tmp2; fjac[i][3][4] = - c2 * ( u[k][j][i][3]*u[k][j][i][1] ) * tmp2; fjac[i][4][4] = c1 * ( u[k][j][i][1] * tmp1 ); njac[i][0][0] = 0.0; njac[i][1][0] = 0.0; njac[i][2][0] = 0.0; njac[i][3][0] = 0.0; njac[i][4][0] = 0.0; njac[i][0][1] = - con43 * c3c4 * tmp2 * u[k][j][i][1]; njac[i][1][1] = con43 * c3c4 * tmp1; njac[i][2][1] = 0.0; njac[i][3][1] = 0.0; njac[i][4][1] = 0.0; njac[i][0][2] = - c3c4 * tmp2 * u[k][j][i][2]; njac[i][1][2] = 0.0; njac[i][2][2] = c3c4 * tmp1; njac[i][3][2] = 0.0; njac[i][4][2] = 0.0; njac[i][0][3] = - c3c4 * tmp2 * u[k][j][i][3]; njac[i][1][3] = 0.0; njac[i][2][3] = 0.0; njac[i][3][3] = c3c4 * tmp1; njac[i][4][3] = 0.0; njac[i][0][4] = - ( con43 * c3c4 - c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1]) - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2]) - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4]; njac[i][1][4] = ( con43 * c3c4 - c1345 ) * tmp2 * u[k][j][i][1]; njac[i][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2]; njac[i][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3]; njac[i][4][4] = ( c1345 ) * tmp1; } // now jacobians set, so form left hand side in x direction //--------------------------------------------------------------------- // lhsinit(lhs, isize); // void lhsinit(double lhs[][3][5][5], int ni) //--------------------------------------------------------------------- for (n = 0; n < 5; n++) { for (m = 0; m < 5; m++) { lhs[0][0][n][m] = 0.0; lhs[0][1][n][m] = 0.0; lhs[0][2][n][m] = 0.0; } lhs[0][1][n][n] = 1.0; } for (n = 0; n < 5; n++) { for (m = 0; m < 5; m++) { lhs[isize][0][n][m] = 0.0; lhs[isize][1][n][m] = 0.0; lhs[isize][2][n][m] = 0.0; } lhs[isize][1][n][n] = 1.0; } for (i = 1; i <= isize-1; i++) { tmp1 = dt * tx1; tmp2 = dt * tx2; lhs[i][AA][0][0] = - tmp2 * fjac[i-1][0][0] - tmp1 * njac[i-1][0][0] - tmp1 * dx1; lhs[i][AA][1][0] = - tmp2 * fjac[i-1][1][0] - tmp1 * njac[i-1][1][0]; lhs[i][AA][2][0] = - tmp2 * fjac[i-1][2][0] - tmp1 * njac[i-1][2][0]; lhs[i][AA][3][0] = - tmp2 * fjac[i-1][3][0] - tmp1 * njac[i-1][3][0]; lhs[i][AA][4][0] = - tmp2 * fjac[i-1][4][0] - tmp1 * njac[i-1][4][0]; lhs[i][AA][0][1] = - tmp2 * fjac[i-1][0][1] - tmp1 * njac[i-1][0][1]; lhs[i][AA][1][1] = - tmp2 * fjac[i-1][1][1] - tmp1 * njac[i-1][1][1] - tmp1 * dx2; lhs[i][AA][2][1] = - tmp2 * fjac[i-1][2][1] - tmp1 * njac[i-1][2][1]; lhs[i][AA][3][1] = - tmp2 * fjac[i-1][3][1] - tmp1 * njac[i-1][3][1]; lhs[i][AA][4][1] = - tmp2 * fjac[i-1][4][1] - tmp1 * njac[i-1][4][1]; lhs[i][AA][0][2] = - tmp2 * fjac[i-1][0][2] - tmp1 * njac[i-1][0][2]; lhs[i][AA][1][2] = - tmp2 * fjac[i-1][1][2] - tmp1 * njac[i-1][1][2]; lhs[i][AA][2][2] = - tmp2 * fjac[i-1][2][2] - tmp1 * njac[i-1][2][2] - tmp1 * dx3; lhs[i][AA][3][2] = - tmp2 * fjac[i-1][3][2] - tmp1 * njac[i-1][3][2]; lhs[i][AA][4][2] = - tmp2 * fjac[i-1][4][2] - tmp1 * njac[i-1][4][2]; lhs[i][AA][0][3] = - tmp2 * fjac[i-1][0][3] - tmp1 * njac[i-1][0][3]; lhs[i][AA][1][3] = - tmp2 * fjac[i-1][1][3] - tmp1 * njac[i-1][1][3]; lhs[i][AA][2][3] = - tmp2 * fjac[i-1][2][3] - tmp1 * njac[i-1][2][3]; lhs[i][AA][3][3] = - tmp2 * fjac[i-1][3][3] - tmp1 * njac[i-1][3][3] - tmp1 * dx4; lhs[i][AA][4][3] = - tmp2 * fjac[i-1][4][3] - tmp1 * njac[i-1][4][3]; lhs[i][AA][0][4] = - tmp2 * fjac[i-1][0][4] - tmp1 * njac[i-1][0][4]; lhs[i][AA][1][4] = - tmp2 * fjac[i-1][1][4] - tmp1 * njac[i-1][1][4]; lhs[i][AA][2][4] = - tmp2 * fjac[i-1][2][4] - tmp1 * njac[i-1][2][4]; lhs[i][AA][3][4] = - tmp2 * fjac[i-1][3][4] - tmp1 * njac[i-1][3][4]; lhs[i][AA][4][4] = - tmp2 * fjac[i-1][4][4] - tmp1 * njac[i-1][4][4] - tmp1 * dx5; lhs[i][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[i][0][0] + tmp1 * 2.0 * dx1; lhs[i][BB][1][0] = tmp1 * 2.0 * njac[i][1][0]; lhs[i][BB][2][0] = tmp1 * 2.0 * njac[i][2][0]; lhs[i][BB][3][0] = tmp1 * 2.0 * njac[i][3][0]; lhs[i][BB][4][0] = tmp1 * 2.0 * njac[i][4][0]; lhs[i][BB][0][1] = tmp1 * 2.0 * njac[i][0][1]; lhs[i][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[i][1][1] + tmp1 * 2.0 * dx2; lhs[i][BB][2][1] = tmp1 * 2.0 * njac[i][2][1]; lhs[i][BB][3][1] = tmp1 * 2.0 * njac[i][3][1]; lhs[i][BB][4][1] = tmp1 * 2.0 * njac[i][4][1]; lhs[i][BB][0][2] = tmp1 * 2.0 * njac[i][0][2]; lhs[i][BB][1][2] = tmp1 * 2.0 * njac[i][1][2]; lhs[i][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[i][2][2] + tmp1 * 2.0 * dx3; lhs[i][BB][3][2] = tmp1 * 2.0 * njac[i][3][2]; lhs[i][BB][4][2] = tmp1 * 2.0 * njac[i][4][2]; lhs[i][BB][0][3] = tmp1 * 2.0 * njac[i][0][3]; lhs[i][BB][1][3] = tmp1 * 2.0 * njac[i][1][3]; lhs[i][BB][2][3] = tmp1 * 2.0 * njac[i][2][3]; lhs[i][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[i][3][3] + tmp1 * 2.0 * dx4; lhs[i][BB][4][3] = tmp1 * 2.0 * njac[i][4][3]; lhs[i][BB][0][4] = tmp1 * 2.0 * njac[i][0][4]; lhs[i][BB][1][4] = tmp1 * 2.0 * njac[i][1][4]; lhs[i][BB][2][4] = tmp1 * 2.0 * njac[i][2][4]; lhs[i][BB][3][4] = tmp1 * 2.0 * njac[i][3][4]; lhs[i][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[i][4][4] + tmp1 * 2.0 * dx5; lhs[i][CC][0][0] = tmp2 * fjac[i+1][0][0] - tmp1 * njac[i+1][0][0] - tmp1 * dx1; lhs[i][CC][1][0] = tmp2 * fjac[i+1][1][0] - tmp1 * njac[i+1][1][0]; lhs[i][CC][2][0] = tmp2 * fjac[i+1][2][0] - tmp1 * njac[i+1][2][0]; lhs[i][CC][3][0] = tmp2 * fjac[i+1][3][0] - tmp1 * njac[i+1][3][0]; lhs[i][CC][4][0] = tmp2 * fjac[i+1][4][0] - tmp1 * njac[i+1][4][0]; lhs[i][CC][0][1] = tmp2 * fjac[i+1][0][1] - tmp1 * njac[i+1][0][1]; lhs[i][CC][1][1] = tmp2 * fjac[i+1][1][1] - tmp1 * njac[i+1][1][1] - tmp1 * dx2; lhs[i][CC][2][1] = tmp2 * fjac[i+1][2][1] - tmp1 * njac[i+1][2][1]; lhs[i][CC][3][1] = tmp2 * fjac[i+1][3][1] - tmp1 * njac[i+1][3][1]; lhs[i][CC][4][1] = tmp2 * fjac[i+1][4][1] - tmp1 * njac[i+1][4][1]; lhs[i][CC][0][2] = tmp2 * fjac[i+1][0][2] - tmp1 * njac[i+1][0][2]; lhs[i][CC][1][2] = tmp2 * fjac[i+1][1][2] - tmp1 * njac[i+1][1][2]; lhs[i][CC][2][2] = tmp2 * fjac[i+1][2][2] - tmp1 * njac[i+1][2][2] - tmp1 * dx3; lhs[i][CC][3][2] = tmp2 * fjac[i+1][3][2] - tmp1 * njac[i+1][3][2]; lhs[i][CC][4][2] = tmp2 * fjac[i+1][4][2] - tmp1 * njac[i+1][4][2]; lhs[i][CC][0][3] = tmp2 * fjac[i+1][0][3] - tmp1 * njac[i+1][0][3]; lhs[i][CC][1][3] = tmp2 * fjac[i+1][1][3] - tmp1 * njac[i+1][1][3]; lhs[i][CC][2][3] = tmp2 * fjac[i+1][2][3] - tmp1 * njac[i+1][2][3]; lhs[i][CC][3][3] = tmp2 * fjac[i+1][3][3] - tmp1 * njac[i+1][3][3] - tmp1 * dx4; lhs[i][CC][4][3] = tmp2 * fjac[i+1][4][3] - tmp1 * njac[i+1][4][3]; lhs[i][CC][0][4] = tmp2 * fjac[i+1][0][4] - tmp1 * njac[i+1][0][4]; lhs[i][CC][1][4] = tmp2 * fjac[i+1][1][4] - tmp1 * njac[i+1][1][4]; lhs[i][CC][2][4] = tmp2 * fjac[i+1][2][4] - tmp1 * njac[i+1][2][4]; lhs[i][CC][3][4] = tmp2 * fjac[i+1][3][4] - tmp1 * njac[i+1][3][4]; lhs[i][CC][4][4] = tmp2 * fjac[i+1][4][4] - tmp1 * njac[i+1][4][4] - tmp1 * dx5; } //--------------------------------------------------------------------- // performs guaussian elimination on this cell. // // assumes that unpacking routines for non-first cells // preload C' and rhs' from previous cell. // // assumed send happens outside this routine, but that // c'(IMAX) and rhs'(IMAX) will be sent to next cell // // outer most do loops - sweeping in i direction // // multiply c[k][j][0] by b_inverse and copy back to c // multiply rhs(0) by b_inverse(0) and copy to rhs //--------------------------------------------------------------------- // binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][j][0] ); // void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) { pivot1 = 1.00/lhs[0][BB][0][0]; lhs[0][BB][1][0] = lhs[0][BB][1][0]*pivot1; lhs[0][BB][2][0] = lhs[0][BB][2][0]*pivot1; lhs[0][BB][3][0] = lhs[0][BB][3][0]*pivot1; lhs[0][BB][4][0] = lhs[0][BB][4][0]*pivot1; lhs[0][CC][0][0] = lhs[0][CC][0][0]*pivot1; lhs[0][CC][1][0] = lhs[0][CC][1][0]*pivot1; lhs[0][CC][2][0] = lhs[0][CC][2][0]*pivot1; lhs[0][CC][3][0] = lhs[0][CC][3][0]*pivot1; lhs[0][CC][4][0] = lhs[0][CC][4][0]*pivot1; rhs[k][j][0][0] = rhs[k][j][0][0] *pivot1; coeff1 = lhs[0][BB][0][1]; lhs[0][BB][1][1]= lhs[0][BB][1][1] - coeff1*lhs[0][BB][1][0]; lhs[0][BB][2][1]= lhs[0][BB][2][1] - coeff1*lhs[0][BB][2][0]; lhs[0][BB][3][1]= lhs[0][BB][3][1] - coeff1*lhs[0][BB][3][0]; lhs[0][BB][4][1]= lhs[0][BB][4][1] - coeff1*lhs[0][BB][4][0]; lhs[0][CC][0][1] = lhs[0][CC][0][1] - coeff1*lhs[0][CC][0][0]; lhs[0][CC][1][1] = lhs[0][CC][1][1] - coeff1*lhs[0][CC][1][0]; lhs[0][CC][2][1] = lhs[0][CC][2][1] - coeff1*lhs[0][CC][2][0]; lhs[0][CC][3][1] = lhs[0][CC][3][1] - coeff1*lhs[0][CC][3][0]; lhs[0][CC][4][1] = lhs[0][CC][4][1] - coeff1*lhs[0][CC][4][0]; rhs[k][j][0][1] = rhs[k][j][0][1] - coeff1*rhs[k][j][0][0]; coeff1 = lhs[0][BB][0][2]; lhs[0][BB][1][2]= lhs[0][BB][1][2] - coeff1*lhs[0][BB][1][0]; lhs[0][BB][2][2]= lhs[0][BB][2][2] - coeff1*lhs[0][BB][2][0]; lhs[0][BB][3][2]= lhs[0][BB][3][2] - coeff1*lhs[0][BB][3][0]; lhs[0][BB][4][2]= lhs[0][BB][4][2] - coeff1*lhs[0][BB][4][0]; lhs[0][CC][0][2] = lhs[0][CC][0][2] - coeff1*lhs[0][CC][0][0]; lhs[0][CC][1][2] = lhs[0][CC][1][2] - coeff1*lhs[0][CC][1][0]; lhs[0][CC][2][2] = lhs[0][CC][2][2] - coeff1*lhs[0][CC][2][0]; lhs[0][CC][3][2] = lhs[0][CC][3][2] - coeff1*lhs[0][CC][3][0]; lhs[0][CC][4][2] = lhs[0][CC][4][2] - coeff1*lhs[0][CC][4][0]; rhs[k][j][0][2] = rhs[k][j][0][2] - coeff1*rhs[k][j][0][0]; coeff1 = lhs[0][BB][0][3]; lhs[0][BB][1][3]= lhs[0][BB][1][3] - coeff1*lhs[0][BB][1][0]; lhs[0][BB][2][3]= lhs[0][BB][2][3] - coeff1*lhs[0][BB][2][0]; lhs[0][BB][3][3]= lhs[0][BB][3][3] - coeff1*lhs[0][BB][3][0]; lhs[0][BB][4][3]= lhs[0][BB][4][3] - coeff1*lhs[0][BB][4][0]; lhs[0][CC][0][3] = lhs[0][CC][0][3] - coeff1*lhs[0][CC][0][0]; lhs[0][CC][1][3] = lhs[0][CC][1][3] - coeff1*lhs[0][CC][1][0]; lhs[0][CC][2][3] = lhs[0][CC][2][3] - coeff1*lhs[0][CC][2][0]; lhs[0][CC][3][3] = lhs[0][CC][3][3] - coeff1*lhs[0][CC][3][0]; lhs[0][CC][4][3] = lhs[0][CC][4][3] - coeff1*lhs[0][CC][4][0]; rhs[k][j][0][3] = rhs[k][j][0][3] - coeff1*rhs[k][j][0][0]; coeff1 = lhs[0][BB][0][4]; lhs[0][BB][1][4]= lhs[0][BB][1][4] - coeff1*lhs[0][BB][1][0]; lhs[0][BB][2][4]= lhs[0][BB][2][4] - coeff1*lhs[0][BB][2][0]; lhs[0][BB][3][4]= lhs[0][BB][3][4] - coeff1*lhs[0][BB][3][0]; lhs[0][BB][4][4]= lhs[0][BB][4][4] - coeff1*lhs[0][BB][4][0]; lhs[0][CC][0][4] = lhs[0][CC][0][4] - coeff1*lhs[0][CC][0][0]; lhs[0][CC][1][4] = lhs[0][CC][1][4] - coeff1*lhs[0][CC][1][0]; lhs[0][CC][2][4] = lhs[0][CC][2][4] - coeff1*lhs[0][CC][2][0]; lhs[0][CC][3][4] = lhs[0][CC][3][4] - coeff1*lhs[0][CC][3][0]; lhs[0][CC][4][4] = lhs[0][CC][4][4] - coeff1*lhs[0][CC][4][0]; rhs[k][j][0][4] = rhs[k][j][0][4] - coeff1*rhs[k][j][0][0]; pivot1 = 1.00/lhs[0][BB][1][1]; lhs[0][BB][2][1] = lhs[0][BB][2][1]*pivot1; lhs[0][BB][3][1] = lhs[0][BB][3][1]*pivot1; lhs[0][BB][4][1] = lhs[0][BB][4][1]*pivot1; lhs[0][CC][0][1] = lhs[0][CC][0][1]*pivot1; lhs[0][CC][1][1] = lhs[0][CC][1][1]*pivot1; lhs[0][CC][2][1] = lhs[0][CC][2][1]*pivot1; lhs[0][CC][3][1] = lhs[0][CC][3][1]*pivot1; lhs[0][CC][4][1] = lhs[0][CC][4][1]*pivot1; rhs[k][j][0][1] = rhs[k][j][0][1] *pivot1; coeff1 = lhs[0][BB][1][0]; lhs[0][BB][2][0]= lhs[0][BB][2][0] - coeff1*lhs[0][BB][2][1]; lhs[0][BB][3][0]= lhs[0][BB][3][0] - coeff1*lhs[0][BB][3][1]; lhs[0][BB][4][0]= lhs[0][BB][4][0] - coeff1*lhs[0][BB][4][1]; lhs[0][CC][0][0] = lhs[0][CC][0][0] - coeff1*lhs[0][CC][0][1]; lhs[0][CC][1][0] = lhs[0][CC][1][0] - coeff1*lhs[0][CC][1][1]; lhs[0][CC][2][0] = lhs[0][CC][2][0] - coeff1*lhs[0][CC][2][1]; lhs[0][CC][3][0] = lhs[0][CC][3][0] - coeff1*lhs[0][CC][3][1]; lhs[0][CC][4][0] = lhs[0][CC][4][0] - coeff1*lhs[0][CC][4][1]; rhs[k][j][0][0] = rhs[k][j][0][0] - coeff1*rhs[k][j][0][1]; coeff1 = lhs[0][BB][1][2]; lhs[0][BB][2][2]= lhs[0][BB][2][2] - coeff1*lhs[0][BB][2][1]; lhs[0][BB][3][2]= lhs[0][BB][3][2] - coeff1*lhs[0][BB][3][1]; lhs[0][BB][4][2]= lhs[0][BB][4][2] - coeff1*lhs[0][BB][4][1]; lhs[0][CC][0][2] = lhs[0][CC][0][2] - coeff1*lhs[0][CC][0][1]; lhs[0][CC][1][2] = lhs[0][CC][1][2] - coeff1*lhs[0][CC][1][1]; lhs[0][CC][2][2] = lhs[0][CC][2][2] - coeff1*lhs[0][CC][2][1]; lhs[0][CC][3][2] = lhs[0][CC][3][2] - coeff1*lhs[0][CC][3][1]; lhs[0][CC][4][2] = lhs[0][CC][4][2] - coeff1*lhs[0][CC][4][1]; rhs[k][j][0][2] = rhs[k][j][0][2] - coeff1*rhs[k][j][0][1]; coeff1 = lhs[0][BB][1][3]; lhs[0][BB][2][3]= lhs[0][BB][2][3] - coeff1*lhs[0][BB][2][1]; lhs[0][BB][3][3]= lhs[0][BB][3][3] - coeff1*lhs[0][BB][3][1]; lhs[0][BB][4][3]= lhs[0][BB][4][3] - coeff1*lhs[0][BB][4][1]; lhs[0][CC][0][3] = lhs[0][CC][0][3] - coeff1*lhs[0][CC][0][1]; lhs[0][CC][1][3] = lhs[0][CC][1][3] - coeff1*lhs[0][CC][1][1]; lhs[0][CC][2][3] = lhs[0][CC][2][3] - coeff1*lhs[0][CC][2][1]; lhs[0][CC][3][3] = lhs[0][CC][3][3] - coeff1*lhs[0][CC][3][1]; lhs[0][CC][4][3] = lhs[0][CC][4][3] - coeff1*lhs[0][CC][4][1]; rhs[k][j][0][3] = rhs[k][j][0][3] - coeff1*rhs[k][j][0][1]; coeff1 = lhs[0][BB][1][4]; lhs[0][BB][2][4]= lhs[0][BB][2][4] - coeff1*lhs[0][BB][2][1]; lhs[0][BB][3][4]= lhs[0][BB][3][4] - coeff1*lhs[0][BB][3][1]; lhs[0][BB][4][4]= lhs[0][BB][4][4] - coeff1*lhs[0][BB][4][1]; lhs[0][CC][0][4] = lhs[0][CC][0][4] - coeff1*lhs[0][CC][0][1]; lhs[0][CC][1][4] = lhs[0][CC][1][4] - coeff1*lhs[0][CC][1][1]; lhs[0][CC][2][4] = lhs[0][CC][2][4] - coeff1*lhs[0][CC][2][1]; lhs[0][CC][3][4] = lhs[0][CC][3][4] - coeff1*lhs[0][CC][3][1]; lhs[0][CC][4][4] = lhs[0][CC][4][4] - coeff1*lhs[0][CC][4][1]; rhs[k][j][0][4] = rhs[k][j][0][4] - coeff1*rhs[k][j][0][1]; pivot1 = 1.00/lhs[0][BB][2][2]; lhs[0][BB][3][2] = lhs[0][BB][3][2]*pivot1; lhs[0][BB][4][2] = lhs[0][BB][4][2]*pivot1; lhs[0][CC][0][2] = lhs[0][CC][0][2]*pivot1; lhs[0][CC][1][2] = lhs[0][CC][1][2]*pivot1; lhs[0][CC][2][2] = lhs[0][CC][2][2]*pivot1; lhs[0][CC][3][2] = lhs[0][CC][3][2]*pivot1; lhs[0][CC][4][2] = lhs[0][CC][4][2]*pivot1; rhs[k][j][0][2] = rhs[k][j][0][2] *pivot1; coeff1 = lhs[0][BB][2][0]; lhs[0][BB][3][0]= lhs[0][BB][3][0] - coeff1*lhs[0][BB][3][2]; lhs[0][BB][4][0]= lhs[0][BB][4][0] - coeff1*lhs[0][BB][4][2]; lhs[0][CC][0][0] = lhs[0][CC][0][0] - coeff1*lhs[0][CC][0][2]; lhs[0][CC][1][0] = lhs[0][CC][1][0] - coeff1*lhs[0][CC][1][2]; lhs[0][CC][2][0] = lhs[0][CC][2][0] - coeff1*lhs[0][CC][2][2]; lhs[0][CC][3][0] = lhs[0][CC][3][0] - coeff1*lhs[0][CC][3][2]; lhs[0][CC][4][0] = lhs[0][CC][4][0] - coeff1*lhs[0][CC][4][2]; rhs[k][j][0][0] = rhs[k][j][0][0] - coeff1*rhs[k][j][0][2]; coeff1 = lhs[0][BB][2][1]; lhs[0][BB][3][1]= lhs[0][BB][3][1] - coeff1*lhs[0][BB][3][2]; lhs[0][BB][4][1]= lhs[0][BB][4][1] - coeff1*lhs[0][BB][4][2]; lhs[0][CC][0][1] = lhs[0][CC][0][1] - coeff1*lhs[0][CC][0][2]; lhs[0][CC][1][1] = lhs[0][CC][1][1] - coeff1*lhs[0][CC][1][2]; lhs[0][CC][2][1] = lhs[0][CC][2][1] - coeff1*lhs[0][CC][2][2]; lhs[0][CC][3][1] = lhs[0][CC][3][1] - coeff1*lhs[0][CC][3][2]; lhs[0][CC][4][1] = lhs[0][CC][4][1] - coeff1*lhs[0][CC][4][2]; rhs[k][j][0][1] = rhs[k][j][0][1] - coeff1*rhs[k][j][0][2]; coeff1 = lhs[0][BB][2][3]; lhs[0][BB][3][3]= lhs[0][BB][3][3] - coeff1*lhs[0][BB][3][2]; lhs[0][BB][4][3]= lhs[0][BB][4][3] - coeff1*lhs[0][BB][4][2]; lhs[0][CC][0][3] = lhs[0][CC][0][3] - coeff1*lhs[0][CC][0][2]; lhs[0][CC][1][3] = lhs[0][CC][1][3] - coeff1*lhs[0][CC][1][2]; lhs[0][CC][2][3] = lhs[0][CC][2][3] - coeff1*lhs[0][CC][2][2]; lhs[0][CC][3][3] = lhs[0][CC][3][3] - coeff1*lhs[0][CC][3][2]; lhs[0][CC][4][3] = lhs[0][CC][4][3] - coeff1*lhs[0][CC][4][2]; rhs[k][j][0][3] = rhs[k][j][0][3] - coeff1*rhs[k][j][0][2]; coeff1 = lhs[0][BB][2][4]; lhs[0][BB][3][4]= lhs[0][BB][3][4] - coeff1*lhs[0][BB][3][2]; lhs[0][BB][4][4]= lhs[0][BB][4][4] - coeff1*lhs[0][BB][4][2]; lhs[0][CC][0][4] = lhs[0][CC][0][4] - coeff1*lhs[0][CC][0][2]; lhs[0][CC][1][4] = lhs[0][CC][1][4] - coeff1*lhs[0][CC][1][2]; lhs[0][CC][2][4] = lhs[0][CC][2][4] - coeff1*lhs[0][CC][2][2]; lhs[0][CC][3][4] = lhs[0][CC][3][4] - coeff1*lhs[0][CC][3][2]; lhs[0][CC][4][4] = lhs[0][CC][4][4] - coeff1*lhs[0][CC][4][2]; rhs[k][j][0][4] = rhs[k][j][0][4] - coeff1*rhs[k][j][0][2]; pivot1 = 1.00/lhs[0][BB][3][3]; lhs[0][BB][4][3] = lhs[0][BB][4][3]*pivot1; lhs[0][CC][0][3] = lhs[0][CC][0][3]*pivot1; lhs[0][CC][1][3] = lhs[0][CC][1][3]*pivot1; lhs[0][CC][2][3] = lhs[0][CC][2][3]*pivot1; lhs[0][CC][3][3] = lhs[0][CC][3][3]*pivot1; lhs[0][CC][4][3] = lhs[0][CC][4][3]*pivot1; rhs[k][j][0][3] = rhs[k][j][0][3] *pivot1; coeff1 = lhs[0][BB][3][0]; lhs[0][BB][4][0]= lhs[0][BB][4][0] - coeff1*lhs[0][BB][4][3]; lhs[0][CC][0][0] = lhs[0][CC][0][0] - coeff1*lhs[0][CC][0][3]; lhs[0][CC][1][0] = lhs[0][CC][1][0] - coeff1*lhs[0][CC][1][3]; lhs[0][CC][2][0] = lhs[0][CC][2][0] - coeff1*lhs[0][CC][2][3]; lhs[0][CC][3][0] = lhs[0][CC][3][0] - coeff1*lhs[0][CC][3][3]; lhs[0][CC][4][0] = lhs[0][CC][4][0] - coeff1*lhs[0][CC][4][3]; rhs[k][j][0][0] = rhs[k][j][0][0] - coeff1*rhs[k][j][0][3]; coeff1 = lhs[0][BB][3][1]; lhs[0][BB][4][1]= lhs[0][BB][4][1] - coeff1*lhs[0][BB][4][3]; lhs[0][CC][0][1] = lhs[0][CC][0][1] - coeff1*lhs[0][CC][0][3]; lhs[0][CC][1][1] = lhs[0][CC][1][1] - coeff1*lhs[0][CC][1][3]; lhs[0][CC][2][1] = lhs[0][CC][2][1] - coeff1*lhs[0][CC][2][3]; lhs[0][CC][3][1] = lhs[0][CC][3][1] - coeff1*lhs[0][CC][3][3]; lhs[0][CC][4][1] = lhs[0][CC][4][1] - coeff1*lhs[0][CC][4][3]; rhs[k][j][0][1] = rhs[k][j][0][1] - coeff1*rhs[k][j][0][3]; coeff1 = lhs[0][BB][3][2]; lhs[0][BB][4][2]= lhs[0][BB][4][2] - coeff1*lhs[0][BB][4][3]; lhs[0][CC][0][2] = lhs[0][CC][0][2] - coeff1*lhs[0][CC][0][3]; lhs[0][CC][1][2] = lhs[0][CC][1][2] - coeff1*lhs[0][CC][1][3]; lhs[0][CC][2][2] = lhs[0][CC][2][2] - coeff1*lhs[0][CC][2][3]; lhs[0][CC][3][2] = lhs[0][CC][3][2] - coeff1*lhs[0][CC][3][3]; lhs[0][CC][4][2] = lhs[0][CC][4][2] - coeff1*lhs[0][CC][4][3]; rhs[k][j][0][2] = rhs[k][j][0][2] - coeff1*rhs[k][j][0][3]; coeff1 = lhs[0][BB][3][4]; lhs[0][BB][4][4]= lhs[0][BB][4][4] - coeff1*lhs[0][BB][4][3]; lhs[0][CC][0][4] = lhs[0][CC][0][4] - coeff1*lhs[0][CC][0][3]; lhs[0][CC][1][4] = lhs[0][CC][1][4] - coeff1*lhs[0][CC][1][3]; lhs[0][CC][2][4] = lhs[0][CC][2][4] - coeff1*lhs[0][CC][2][3]; lhs[0][CC][3][4] = lhs[0][CC][3][4] - coeff1*lhs[0][CC][3][3]; lhs[0][CC][4][4] = lhs[0][CC][4][4] - coeff1*lhs[0][CC][4][3]; rhs[k][j][0][4] = rhs[k][j][0][4] - coeff1*rhs[k][j][0][3]; pivot1 = 1.00/lhs[0][BB][4][4]; lhs[0][CC][0][4] = lhs[0][CC][0][4]*pivot1; lhs[0][CC][1][4] = lhs[0][CC][1][4]*pivot1; lhs[0][CC][2][4] = lhs[0][CC][2][4]*pivot1; lhs[0][CC][3][4] = lhs[0][CC][3][4]*pivot1; lhs[0][CC][4][4] = lhs[0][CC][4][4]*pivot1; rhs[k][j][0][4] = rhs[k][j][0][4] *pivot1; coeff1 = lhs[0][BB][4][0]; lhs[0][CC][0][0] = lhs[0][CC][0][0] - coeff1*lhs[0][CC][0][4]; lhs[0][CC][1][0] = lhs[0][CC][1][0] - coeff1*lhs[0][CC][1][4]; lhs[0][CC][2][0] = lhs[0][CC][2][0] - coeff1*lhs[0][CC][2][4]; lhs[0][CC][3][0] = lhs[0][CC][3][0] - coeff1*lhs[0][CC][3][4]; lhs[0][CC][4][0] = lhs[0][CC][4][0] - coeff1*lhs[0][CC][4][4]; rhs[k][j][0][0] = rhs[k][j][0][0] - coeff1*rhs[k][j][0][4]; coeff1 = lhs[0][BB][4][1]; lhs[0][CC][0][1] = lhs[0][CC][0][1] - coeff1*lhs[0][CC][0][4]; lhs[0][CC][1][1] = lhs[0][CC][1][1] - coeff1*lhs[0][CC][1][4]; lhs[0][CC][2][1] = lhs[0][CC][2][1] - coeff1*lhs[0][CC][2][4]; lhs[0][CC][3][1] = lhs[0][CC][3][1] - coeff1*lhs[0][CC][3][4]; lhs[0][CC][4][1] = lhs[0][CC][4][1] - coeff1*lhs[0][CC][4][4]; rhs[k][j][0][1] = rhs[k][j][0][1] - coeff1*rhs[k][j][0][4]; coeff1 = lhs[0][BB][4][2]; lhs[0][CC][0][2] = lhs[0][CC][0][2] - coeff1*lhs[0][CC][0][4]; lhs[0][CC][1][2] = lhs[0][CC][1][2] - coeff1*lhs[0][CC][1][4]; lhs[0][CC][2][2] = lhs[0][CC][2][2] - coeff1*lhs[0][CC][2][4]; lhs[0][CC][3][2] = lhs[0][CC][3][2] - coeff1*lhs[0][CC][3][4]; lhs[0][CC][4][2] = lhs[0][CC][4][2] - coeff1*lhs[0][CC][4][4]; rhs[k][j][0][2] = rhs[k][j][0][2] - coeff1*rhs[k][j][0][4]; coeff1 = lhs[0][BB][4][3]; lhs[0][CC][0][3] = lhs[0][CC][0][3] - coeff1*lhs[0][CC][0][4]; lhs[0][CC][1][3] = lhs[0][CC][1][3] - coeff1*lhs[0][CC][1][4]; lhs[0][CC][2][3] = lhs[0][CC][2][3] - coeff1*lhs[0][CC][2][4]; lhs[0][CC][3][3] = lhs[0][CC][3][3] - coeff1*lhs[0][CC][3][4]; lhs[0][CC][4][3] = lhs[0][CC][4][3] - coeff1*lhs[0][CC][4][4]; rhs[k][j][0][3] = rhs[k][j][0][3] - coeff1*rhs[k][j][0][4]; } //END of binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][j][0] ); // begin inner most do loop // do all the elements of the cell unless last for (i = 1; i <= isize-1; i++) { //------------------------------------------------------------------- // rhs(i) = rhs(i) - A*rhs(i-1) // // matvec_sub( lhs[i][AA], rhs[k][j][i-1], rhs[k][j][i]); // void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) //------------------------------------------------------------------- { rhs[k][j][i][0] = rhs[k][j][i][0] - lhs[i][AA][0][0]*rhs[k][j][i-1][0] - lhs[i][AA][1][0]*rhs[k][j][i-1][1] - lhs[i][AA][2][0]*rhs[k][j][i-1][2] - lhs[i][AA][3][0]*rhs[k][j][i-1][3] - lhs[i][AA][4][0]*rhs[k][j][i-1][4]; rhs[k][j][i][1] = rhs[k][j][i][1] - lhs[i][AA][0][1]*rhs[k][j][i-1][0] - lhs[i][AA][1][1]*rhs[k][j][i-1][1] - lhs[i][AA][2][1]*rhs[k][j][i-1][2] - lhs[i][AA][3][1]*rhs[k][j][i-1][3] - lhs[i][AA][4][1]*rhs[k][j][i-1][4]; rhs[k][j][i][2] = rhs[k][j][i][2] - lhs[i][AA][0][2]*rhs[k][j][i-1][0] - lhs[i][AA][1][2]*rhs[k][j][i-1][1] - lhs[i][AA][2][2]*rhs[k][j][i-1][2] - lhs[i][AA][3][2]*rhs[k][j][i-1][3] - lhs[i][AA][4][2]*rhs[k][j][i-1][4]; rhs[k][j][i][3] = rhs[k][j][i][3] - lhs[i][AA][0][3]*rhs[k][j][i-1][0] - lhs[i][AA][1][3]*rhs[k][j][i-1][1] - lhs[i][AA][2][3]*rhs[k][j][i-1][2] - lhs[i][AA][3][3]*rhs[k][j][i-1][3] - lhs[i][AA][4][3]*rhs[k][j][i-1][4]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhs[i][AA][0][4]*rhs[k][j][i-1][0] - lhs[i][AA][1][4]*rhs[k][j][i-1][1] - lhs[i][AA][2][4]*rhs[k][j][i-1][2] - lhs[i][AA][3][4]*rhs[k][j][i-1][3] - lhs[i][AA][4][4]*rhs[k][j][i-1][4]; } //------------------------------------------------------------------- // B(i) = B(i) - C(i-1)*A(i) // matmul_sub(lhs[i][AA], lhs[i-1][CC], lhs[i][BB]); // void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]) //------------------------------------------------------------------- { lhs[i][BB][0][0] = lhs[i][BB][0][0] - lhs[i][AA][0][0]*lhs[i-1][CC][0][0] - lhs[i][AA][1][0]*lhs[i-1][CC][0][1] - lhs[i][AA][2][0]*lhs[i-1][CC][0][2] - lhs[i][AA][3][0]*lhs[i-1][CC][0][3] - lhs[i][AA][4][0]*lhs[i-1][CC][0][4]; lhs[i][BB][0][1] = lhs[i][BB][0][1] - lhs[i][AA][0][1]*lhs[i-1][CC][0][0] - lhs[i][AA][1][1]*lhs[i-1][CC][0][1] - lhs[i][AA][2][1]*lhs[i-1][CC][0][2] - lhs[i][AA][3][1]*lhs[i-1][CC][0][3] - lhs[i][AA][4][1]*lhs[i-1][CC][0][4]; lhs[i][BB][0][2] = lhs[i][BB][0][2] - lhs[i][AA][0][2]*lhs[i-1][CC][0][0] - lhs[i][AA][1][2]*lhs[i-1][CC][0][1] - lhs[i][AA][2][2]*lhs[i-1][CC][0][2] - lhs[i][AA][3][2]*lhs[i-1][CC][0][3] - lhs[i][AA][4][2]*lhs[i-1][CC][0][4]; lhs[i][BB][0][3] = lhs[i][BB][0][3] - lhs[i][AA][0][3]*lhs[i-1][CC][0][0] - lhs[i][AA][1][3]*lhs[i-1][CC][0][1] - lhs[i][AA][2][3]*lhs[i-1][CC][0][2] - lhs[i][AA][3][3]*lhs[i-1][CC][0][3] - lhs[i][AA][4][3]*lhs[i-1][CC][0][4]; lhs[i][BB][0][4] = lhs[i][BB][0][4] - lhs[i][AA][0][4]*lhs[i-1][CC][0][0] - lhs[i][AA][1][4]*lhs[i-1][CC][0][1] - lhs[i][AA][2][4]*lhs[i-1][CC][0][2] - lhs[i][AA][3][4]*lhs[i-1][CC][0][3] - lhs[i][AA][4][4]*lhs[i-1][CC][0][4]; lhs[i][BB][1][0] = lhs[i][BB][1][0] - lhs[i][AA][0][0]*lhs[i-1][CC][1][0] - lhs[i][AA][1][0]*lhs[i-1][CC][1][1] - lhs[i][AA][2][0]*lhs[i-1][CC][1][2] - lhs[i][AA][3][0]*lhs[i-1][CC][1][3] - lhs[i][AA][4][0]*lhs[i-1][CC][1][4]; lhs[i][BB][1][1] = lhs[i][BB][1][1] - lhs[i][AA][0][1]*lhs[i-1][CC][1][0] - lhs[i][AA][1][1]*lhs[i-1][CC][1][1] - lhs[i][AA][2][1]*lhs[i-1][CC][1][2] - lhs[i][AA][3][1]*lhs[i-1][CC][1][3] - lhs[i][AA][4][1]*lhs[i-1][CC][1][4]; lhs[i][BB][1][2] = lhs[i][BB][1][2] - lhs[i][AA][0][2]*lhs[i-1][CC][1][0] - lhs[i][AA][1][2]*lhs[i-1][CC][1][1] - lhs[i][AA][2][2]*lhs[i-1][CC][1][2] - lhs[i][AA][3][2]*lhs[i-1][CC][1][3] - lhs[i][AA][4][2]*lhs[i-1][CC][1][4]; lhs[i][BB][1][3] = lhs[i][BB][1][3] - lhs[i][AA][0][3]*lhs[i-1][CC][1][0] - lhs[i][AA][1][3]*lhs[i-1][CC][1][1] - lhs[i][AA][2][3]*lhs[i-1][CC][1][2] - lhs[i][AA][3][3]*lhs[i-1][CC][1][3] - lhs[i][AA][4][3]*lhs[i-1][CC][1][4]; lhs[i][BB][1][4] = lhs[i][BB][1][4] - lhs[i][AA][0][4]*lhs[i-1][CC][1][0] - lhs[i][AA][1][4]*lhs[i-1][CC][1][1] - lhs[i][AA][2][4]*lhs[i-1][CC][1][2] - lhs[i][AA][3][4]*lhs[i-1][CC][1][3] - lhs[i][AA][4][4]*lhs[i-1][CC][1][4]; lhs[i][BB][2][0] = lhs[i][BB][2][0] - lhs[i][AA][0][0]*lhs[i-1][CC][2][0] - lhs[i][AA][1][0]*lhs[i-1][CC][2][1] - lhs[i][AA][2][0]*lhs[i-1][CC][2][2] - lhs[i][AA][3][0]*lhs[i-1][CC][2][3] - lhs[i][AA][4][0]*lhs[i-1][CC][2][4]; lhs[i][BB][2][1] = lhs[i][BB][2][1] - lhs[i][AA][0][1]*lhs[i-1][CC][2][0] - lhs[i][AA][1][1]*lhs[i-1][CC][2][1] - lhs[i][AA][2][1]*lhs[i-1][CC][2][2] - lhs[i][AA][3][1]*lhs[i-1][CC][2][3] - lhs[i][AA][4][1]*lhs[i-1][CC][2][4]; lhs[i][BB][2][2] = lhs[i][BB][2][2] - lhs[i][AA][0][2]*lhs[i-1][CC][2][0] - lhs[i][AA][1][2]*lhs[i-1][CC][2][1] - lhs[i][AA][2][2]*lhs[i-1][CC][2][2] - lhs[i][AA][3][2]*lhs[i-1][CC][2][3] - lhs[i][AA][4][2]*lhs[i-1][CC][2][4]; lhs[i][BB][2][3] = lhs[i][BB][2][3] - lhs[i][AA][0][3]*lhs[i-1][CC][2][0] - lhs[i][AA][1][3]*lhs[i-1][CC][2][1] - lhs[i][AA][2][3]*lhs[i-1][CC][2][2] - lhs[i][AA][3][3]*lhs[i-1][CC][2][3] - lhs[i][AA][4][3]*lhs[i-1][CC][2][4]; lhs[i][BB][2][4] = lhs[i][BB][2][4] - lhs[i][AA][0][4]*lhs[i-1][CC][2][0] - lhs[i][AA][1][4]*lhs[i-1][CC][2][1] - lhs[i][AA][2][4]*lhs[i-1][CC][2][2] - lhs[i][AA][3][4]*lhs[i-1][CC][2][3] - lhs[i][AA][4][4]*lhs[i-1][CC][2][4]; lhs[i][BB][3][0] = lhs[i][BB][3][0] - lhs[i][AA][0][0]*lhs[i-1][CC][3][0] - lhs[i][AA][1][0]*lhs[i-1][CC][3][1] - lhs[i][AA][2][0]*lhs[i-1][CC][3][2] - lhs[i][AA][3][0]*lhs[i-1][CC][3][3] - lhs[i][AA][4][0]*lhs[i-1][CC][3][4]; lhs[i][BB][3][1] = lhs[i][BB][3][1] - lhs[i][AA][0][1]*lhs[i-1][CC][3][0] - lhs[i][AA][1][1]*lhs[i-1][CC][3][1] - lhs[i][AA][2][1]*lhs[i-1][CC][3][2] - lhs[i][AA][3][1]*lhs[i-1][CC][3][3] - lhs[i][AA][4][1]*lhs[i-1][CC][3][4]; lhs[i][BB][3][2] = lhs[i][BB][3][2] - lhs[i][AA][0][2]*lhs[i-1][CC][3][0] - lhs[i][AA][1][2]*lhs[i-1][CC][3][1] - lhs[i][AA][2][2]*lhs[i-1][CC][3][2] - lhs[i][AA][3][2]*lhs[i-1][CC][3][3] - lhs[i][AA][4][2]*lhs[i-1][CC][3][4]; lhs[i][BB][3][3] = lhs[i][BB][3][3] - lhs[i][AA][0][3]*lhs[i-1][CC][3][0] - lhs[i][AA][1][3]*lhs[i-1][CC][3][1] - lhs[i][AA][2][3]*lhs[i-1][CC][3][2] - lhs[i][AA][3][3]*lhs[i-1][CC][3][3] - lhs[i][AA][4][3]*lhs[i-1][CC][3][4]; lhs[i][BB][3][4] = lhs[i][BB][3][4] - lhs[i][AA][0][4]*lhs[i-1][CC][3][0] - lhs[i][AA][1][4]*lhs[i-1][CC][3][1] - lhs[i][AA][2][4]*lhs[i-1][CC][3][2] - lhs[i][AA][3][4]*lhs[i-1][CC][3][3] - lhs[i][AA][4][4]*lhs[i-1][CC][3][4]; lhs[i][BB][4][0] = lhs[i][BB][4][0] - lhs[i][AA][0][0]*lhs[i-1][CC][4][0] - lhs[i][AA][1][0]*lhs[i-1][CC][4][1] - lhs[i][AA][2][0]*lhs[i-1][CC][4][2] - lhs[i][AA][3][0]*lhs[i-1][CC][4][3] - lhs[i][AA][4][0]*lhs[i-1][CC][4][4]; lhs[i][BB][4][1] = lhs[i][BB][4][1] - lhs[i][AA][0][1]*lhs[i-1][CC][4][0] - lhs[i][AA][1][1]*lhs[i-1][CC][4][1] - lhs[i][AA][2][1]*lhs[i-1][CC][4][2] - lhs[i][AA][3][1]*lhs[i-1][CC][4][3] - lhs[i][AA][4][1]*lhs[i-1][CC][4][4]; lhs[i][BB][4][2] = lhs[i][BB][4][2] - lhs[i][AA][0][2]*lhs[i-1][CC][4][0] - lhs[i][AA][1][2]*lhs[i-1][CC][4][1] - lhs[i][AA][2][2]*lhs[i-1][CC][4][2] - lhs[i][AA][3][2]*lhs[i-1][CC][4][3] - lhs[i][AA][4][2]*lhs[i-1][CC][4][4]; lhs[i][BB][4][3] = lhs[i][BB][4][3] - lhs[i][AA][0][3]*lhs[i-1][CC][4][0] - lhs[i][AA][1][3]*lhs[i-1][CC][4][1] - lhs[i][AA][2][3]*lhs[i-1][CC][4][2] - lhs[i][AA][3][3]*lhs[i-1][CC][4][3] - lhs[i][AA][4][3]*lhs[i-1][CC][4][4]; lhs[i][BB][4][4] = lhs[i][BB][4][4] - lhs[i][AA][0][4]*lhs[i-1][CC][4][0] - lhs[i][AA][1][4]*lhs[i-1][CC][4][1] - lhs[i][AA][2][4]*lhs[i-1][CC][4][2] - lhs[i][AA][3][4]*lhs[i-1][CC][4][3] - lhs[i][AA][4][4]*lhs[i-1][CC][4][4]; } //------------------------------------------------------------------- // multiply c[k][j][i] by b_inverse and copy back to c // multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs // // binvcrhs( lhs[i][BB], lhs[i][CC], rhs[k][j][i] ); // void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) //------------------------------------------------------------------- { pivot2 = 1.00/lhs[i][BB][0][0]; lhs[i][BB][1][0] = lhs[i][BB][1][0]*pivot2; lhs[i][BB][2][0] = lhs[i][BB][2][0]*pivot2; lhs[i][BB][3][0] = lhs[i][BB][3][0]*pivot2; lhs[i][BB][4][0] = lhs[i][BB][4][0]*pivot2; lhs[i][CC][0][0] = lhs[i][CC][0][0]*pivot2; lhs[i][CC][1][0] = lhs[i][CC][1][0]*pivot2; lhs[i][CC][2][0] = lhs[i][CC][2][0]*pivot2; lhs[i][CC][3][0] = lhs[i][CC][3][0]*pivot2; lhs[i][CC][4][0] = lhs[i][CC][4][0]*pivot2; rhs[k][j][i][0] = rhs[k][j][i][0] *pivot2; coeff2 = lhs[i][BB][0][1]; lhs[i][BB][1][1]= lhs[i][BB][1][1] - coeff2*lhs[i][BB][1][0]; lhs[i][BB][2][1]= lhs[i][BB][2][1] - coeff2*lhs[i][BB][2][0]; lhs[i][BB][3][1]= lhs[i][BB][3][1] - coeff2*lhs[i][BB][3][0]; lhs[i][BB][4][1]= lhs[i][BB][4][1] - coeff2*lhs[i][BB][4][0]; lhs[i][CC][0][1] = lhs[i][CC][0][1] - coeff2*lhs[i][CC][0][0]; lhs[i][CC][1][1] = lhs[i][CC][1][1] - coeff2*lhs[i][CC][1][0]; lhs[i][CC][2][1] = lhs[i][CC][2][1] - coeff2*lhs[i][CC][2][0]; lhs[i][CC][3][1] = lhs[i][CC][3][1] - coeff2*lhs[i][CC][3][0]; lhs[i][CC][4][1] = lhs[i][CC][4][1] - coeff2*lhs[i][CC][4][0]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff2*rhs[k][j][i][0]; coeff2 = lhs[i][BB][0][2]; lhs[i][BB][1][2]= lhs[i][BB][1][2] - coeff2*lhs[i][BB][1][0]; lhs[i][BB][2][2]= lhs[i][BB][2][2] - coeff2*lhs[i][BB][2][0]; lhs[i][BB][3][2]= lhs[i][BB][3][2] - coeff2*lhs[i][BB][3][0]; lhs[i][BB][4][2]= lhs[i][BB][4][2] - coeff2*lhs[i][BB][4][0]; lhs[i][CC][0][2] = lhs[i][CC][0][2] - coeff2*lhs[i][CC][0][0]; lhs[i][CC][1][2] = lhs[i][CC][1][2] - coeff2*lhs[i][CC][1][0]; lhs[i][CC][2][2] = lhs[i][CC][2][2] - coeff2*lhs[i][CC][2][0]; lhs[i][CC][3][2] = lhs[i][CC][3][2] - coeff2*lhs[i][CC][3][0]; lhs[i][CC][4][2] = lhs[i][CC][4][2] - coeff2*lhs[i][CC][4][0]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff2*rhs[k][j][i][0]; coeff2 = lhs[i][BB][0][3]; lhs[i][BB][1][3]= lhs[i][BB][1][3] - coeff2*lhs[i][BB][1][0]; lhs[i][BB][2][3]= lhs[i][BB][2][3] - coeff2*lhs[i][BB][2][0]; lhs[i][BB][3][3]= lhs[i][BB][3][3] - coeff2*lhs[i][BB][3][0]; lhs[i][BB][4][3]= lhs[i][BB][4][3] - coeff2*lhs[i][BB][4][0]; lhs[i][CC][0][3] = lhs[i][CC][0][3] - coeff2*lhs[i][CC][0][0]; lhs[i][CC][1][3] = lhs[i][CC][1][3] - coeff2*lhs[i][CC][1][0]; lhs[i][CC][2][3] = lhs[i][CC][2][3] - coeff2*lhs[i][CC][2][0]; lhs[i][CC][3][3] = lhs[i][CC][3][3] - coeff2*lhs[i][CC][3][0]; lhs[i][CC][4][3] = lhs[i][CC][4][3] - coeff2*lhs[i][CC][4][0]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff2*rhs[k][j][i][0]; coeff2 = lhs[i][BB][0][4]; lhs[i][BB][1][4]= lhs[i][BB][1][4] - coeff2*lhs[i][BB][1][0]; lhs[i][BB][2][4]= lhs[i][BB][2][4] - coeff2*lhs[i][BB][2][0]; lhs[i][BB][3][4]= lhs[i][BB][3][4] - coeff2*lhs[i][BB][3][0]; lhs[i][BB][4][4]= lhs[i][BB][4][4] - coeff2*lhs[i][BB][4][0]; lhs[i][CC][0][4] = lhs[i][CC][0][4] - coeff2*lhs[i][CC][0][0]; lhs[i][CC][1][4] = lhs[i][CC][1][4] - coeff2*lhs[i][CC][1][0]; lhs[i][CC][2][4] = lhs[i][CC][2][4] - coeff2*lhs[i][CC][2][0]; lhs[i][CC][3][4] = lhs[i][CC][3][4] - coeff2*lhs[i][CC][3][0]; lhs[i][CC][4][4] = lhs[i][CC][4][4] - coeff2*lhs[i][CC][4][0]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff2*rhs[k][j][i][0]; pivot2 = 1.00/lhs[i][BB][1][1]; lhs[i][BB][2][1] = lhs[i][BB][2][1]*pivot2; lhs[i][BB][3][1] = lhs[i][BB][3][1]*pivot2; lhs[i][BB][4][1] = lhs[i][BB][4][1]*pivot2; lhs[i][CC][0][1] = lhs[i][CC][0][1]*pivot2; lhs[i][CC][1][1] = lhs[i][CC][1][1]*pivot2; lhs[i][CC][2][1] = lhs[i][CC][2][1]*pivot2; lhs[i][CC][3][1] = lhs[i][CC][3][1]*pivot2; lhs[i][CC][4][1] = lhs[i][CC][4][1]*pivot2; rhs[k][j][i][1] = rhs[k][j][i][1] *pivot2; coeff2 = lhs[i][BB][1][0]; lhs[i][BB][2][0]= lhs[i][BB][2][0] - coeff2*lhs[i][BB][2][1]; lhs[i][BB][3][0]= lhs[i][BB][3][0] - coeff2*lhs[i][BB][3][1]; lhs[i][BB][4][0]= lhs[i][BB][4][0] - coeff2*lhs[i][BB][4][1]; lhs[i][CC][0][0] = lhs[i][CC][0][0] - coeff2*lhs[i][CC][0][1]; lhs[i][CC][1][0] = lhs[i][CC][1][0] - coeff2*lhs[i][CC][1][1]; lhs[i][CC][2][0] = lhs[i][CC][2][0] - coeff2*lhs[i][CC][2][1]; lhs[i][CC][3][0] = lhs[i][CC][3][0] - coeff2*lhs[i][CC][3][1]; lhs[i][CC][4][0] = lhs[i][CC][4][0] - coeff2*lhs[i][CC][4][1]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff2*rhs[k][j][i][1]; coeff2 = lhs[i][BB][1][2]; lhs[i][BB][2][2]= lhs[i][BB][2][2] - coeff2*lhs[i][BB][2][1]; lhs[i][BB][3][2]= lhs[i][BB][3][2] - coeff2*lhs[i][BB][3][1]; lhs[i][BB][4][2]= lhs[i][BB][4][2] - coeff2*lhs[i][BB][4][1]; lhs[i][CC][0][2] = lhs[i][CC][0][2] - coeff2*lhs[i][CC][0][1]; lhs[i][CC][1][2] = lhs[i][CC][1][2] - coeff2*lhs[i][CC][1][1]; lhs[i][CC][2][2] = lhs[i][CC][2][2] - coeff2*lhs[i][CC][2][1]; lhs[i][CC][3][2] = lhs[i][CC][3][2] - coeff2*lhs[i][CC][3][1]; lhs[i][CC][4][2] = lhs[i][CC][4][2] - coeff2*lhs[i][CC][4][1]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff2*rhs[k][j][i][1]; coeff2 = lhs[i][BB][1][3]; lhs[i][BB][2][3]= lhs[i][BB][2][3] - coeff2*lhs[i][BB][2][1]; lhs[i][BB][3][3]= lhs[i][BB][3][3] - coeff2*lhs[i][BB][3][1]; lhs[i][BB][4][3]= lhs[i][BB][4][3] - coeff2*lhs[i][BB][4][1]; lhs[i][CC][0][3] = lhs[i][CC][0][3] - coeff2*lhs[i][CC][0][1]; lhs[i][CC][1][3] = lhs[i][CC][1][3] - coeff2*lhs[i][CC][1][1]; lhs[i][CC][2][3] = lhs[i][CC][2][3] - coeff2*lhs[i][CC][2][1]; lhs[i][CC][3][3] = lhs[i][CC][3][3] - coeff2*lhs[i][CC][3][1]; lhs[i][CC][4][3] = lhs[i][CC][4][3] - coeff2*lhs[i][CC][4][1]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff2*rhs[k][j][i][1]; coeff2 = lhs[i][BB][1][4]; lhs[i][BB][2][4]= lhs[i][BB][2][4] - coeff2*lhs[i][BB][2][1]; lhs[i][BB][3][4]= lhs[i][BB][3][4] - coeff2*lhs[i][BB][3][1]; lhs[i][BB][4][4]= lhs[i][BB][4][4] - coeff2*lhs[i][BB][4][1]; lhs[i][CC][0][4] = lhs[i][CC][0][4] - coeff2*lhs[i][CC][0][1]; lhs[i][CC][1][4] = lhs[i][CC][1][4] - coeff2*lhs[i][CC][1][1]; lhs[i][CC][2][4] = lhs[i][CC][2][4] - coeff2*lhs[i][CC][2][1]; lhs[i][CC][3][4] = lhs[i][CC][3][4] - coeff2*lhs[i][CC][3][1]; lhs[i][CC][4][4] = lhs[i][CC][4][4] - coeff2*lhs[i][CC][4][1]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff2*rhs[k][j][i][1]; pivot2 = 1.00/lhs[i][BB][2][2]; lhs[i][BB][3][2] = lhs[i][BB][3][2]*pivot2; lhs[i][BB][4][2] = lhs[i][BB][4][2]*pivot2; lhs[i][CC][0][2] = lhs[i][CC][0][2]*pivot2; lhs[i][CC][1][2] = lhs[i][CC][1][2]*pivot2; lhs[i][CC][2][2] = lhs[i][CC][2][2]*pivot2; lhs[i][CC][3][2] = lhs[i][CC][3][2]*pivot2; lhs[i][CC][4][2] = lhs[i][CC][4][2]*pivot2; rhs[k][j][i][2] = rhs[k][j][i][2] *pivot2; coeff2 = lhs[i][BB][2][0]; lhs[i][BB][3][0]= lhs[i][BB][3][0] - coeff2*lhs[i][BB][3][2]; lhs[i][BB][4][0]= lhs[i][BB][4][0] - coeff2*lhs[i][BB][4][2]; lhs[i][CC][0][0] = lhs[i][CC][0][0] - coeff2*lhs[i][CC][0][2]; lhs[i][CC][1][0] = lhs[i][CC][1][0] - coeff2*lhs[i][CC][1][2]; lhs[i][CC][2][0] = lhs[i][CC][2][0] - coeff2*lhs[i][CC][2][2]; lhs[i][CC][3][0] = lhs[i][CC][3][0] - coeff2*lhs[i][CC][3][2]; lhs[i][CC][4][0] = lhs[i][CC][4][0] - coeff2*lhs[i][CC][4][2]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff2*rhs[k][j][i][2]; coeff2 = lhs[i][BB][2][1]; lhs[i][BB][3][1]= lhs[i][BB][3][1] - coeff2*lhs[i][BB][3][2]; lhs[i][BB][4][1]= lhs[i][BB][4][1] - coeff2*lhs[i][BB][4][2]; lhs[i][CC][0][1] = lhs[i][CC][0][1] - coeff2*lhs[i][CC][0][2]; lhs[i][CC][1][1] = lhs[i][CC][1][1] - coeff2*lhs[i][CC][1][2]; lhs[i][CC][2][1] = lhs[i][CC][2][1] - coeff2*lhs[i][CC][2][2]; lhs[i][CC][3][1] = lhs[i][CC][3][1] - coeff2*lhs[i][CC][3][2]; lhs[i][CC][4][1] = lhs[i][CC][4][1] - coeff2*lhs[i][CC][4][2]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff2*rhs[k][j][i][2]; coeff2 = lhs[i][BB][2][3]; lhs[i][BB][3][3]= lhs[i][BB][3][3] - coeff2*lhs[i][BB][3][2]; lhs[i][BB][4][3]= lhs[i][BB][4][3] - coeff2*lhs[i][BB][4][2]; lhs[i][CC][0][3] = lhs[i][CC][0][3] - coeff2*lhs[i][CC][0][2]; lhs[i][CC][1][3] = lhs[i][CC][1][3] - coeff2*lhs[i][CC][1][2]; lhs[i][CC][2][3] = lhs[i][CC][2][3] - coeff2*lhs[i][CC][2][2]; lhs[i][CC][3][3] = lhs[i][CC][3][3] - coeff2*lhs[i][CC][3][2]; lhs[i][CC][4][3] = lhs[i][CC][4][3] - coeff2*lhs[i][CC][4][2]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff2*rhs[k][j][i][2]; coeff2 = lhs[i][BB][2][4]; lhs[i][BB][3][4]= lhs[i][BB][3][4] - coeff2*lhs[i][BB][3][2]; lhs[i][BB][4][4]= lhs[i][BB][4][4] - coeff2*lhs[i][BB][4][2]; lhs[i][CC][0][4] = lhs[i][CC][0][4] - coeff2*lhs[i][CC][0][2]; lhs[i][CC][1][4] = lhs[i][CC][1][4] - coeff2*lhs[i][CC][1][2]; lhs[i][CC][2][4] = lhs[i][CC][2][4] - coeff2*lhs[i][CC][2][2]; lhs[i][CC][3][4] = lhs[i][CC][3][4] - coeff2*lhs[i][CC][3][2]; lhs[i][CC][4][4] = lhs[i][CC][4][4] - coeff2*lhs[i][CC][4][2]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff2*rhs[k][j][i][2]; pivot2 = 1.00/lhs[i][BB][3][3]; lhs[i][BB][4][3] = lhs[i][BB][4][3]*pivot2; lhs[i][CC][0][3] = lhs[i][CC][0][3]*pivot2; lhs[i][CC][1][3] = lhs[i][CC][1][3]*pivot2; lhs[i][CC][2][3] = lhs[i][CC][2][3]*pivot2; lhs[i][CC][3][3] = lhs[i][CC][3][3]*pivot2; lhs[i][CC][4][3] = lhs[i][CC][4][3]*pivot2; rhs[k][j][i][3] = rhs[k][j][i][3] *pivot2; coeff2 = lhs[i][BB][3][0]; lhs[i][BB][4][0]= lhs[i][BB][4][0] - coeff2*lhs[i][BB][4][3]; lhs[i][CC][0][0] = lhs[i][CC][0][0] - coeff2*lhs[i][CC][0][3]; lhs[i][CC][1][0] = lhs[i][CC][1][0] - coeff2*lhs[i][CC][1][3]; lhs[i][CC][2][0] = lhs[i][CC][2][0] - coeff2*lhs[i][CC][2][3]; lhs[i][CC][3][0] = lhs[i][CC][3][0] - coeff2*lhs[i][CC][3][3]; lhs[i][CC][4][0] = lhs[i][CC][4][0] - coeff2*lhs[i][CC][4][3]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff2*rhs[k][j][i][3]; coeff2 = lhs[i][BB][3][1]; lhs[i][BB][4][1]= lhs[i][BB][4][1] - coeff2*lhs[i][BB][4][3]; lhs[i][CC][0][1] = lhs[i][CC][0][1] - coeff2*lhs[i][CC][0][3]; lhs[i][CC][1][1] = lhs[i][CC][1][1] - coeff2*lhs[i][CC][1][3]; lhs[i][CC][2][1] = lhs[i][CC][2][1] - coeff2*lhs[i][CC][2][3]; lhs[i][CC][3][1] = lhs[i][CC][3][1] - coeff2*lhs[i][CC][3][3]; lhs[i][CC][4][1] = lhs[i][CC][4][1] - coeff2*lhs[i][CC][4][3]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff2*rhs[k][j][i][3]; coeff2 = lhs[i][BB][3][2]; lhs[i][BB][4][2]= lhs[i][BB][4][2] - coeff2*lhs[i][BB][4][3]; lhs[i][CC][0][2] = lhs[i][CC][0][2] - coeff2*lhs[i][CC][0][3]; lhs[i][CC][1][2] = lhs[i][CC][1][2] - coeff2*lhs[i][CC][1][3]; lhs[i][CC][2][2] = lhs[i][CC][2][2] - coeff2*lhs[i][CC][2][3]; lhs[i][CC][3][2] = lhs[i][CC][3][2] - coeff2*lhs[i][CC][3][3]; lhs[i][CC][4][2] = lhs[i][CC][4][2] - coeff2*lhs[i][CC][4][3]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff2*rhs[k][j][i][3]; coeff2 = lhs[i][BB][3][4]; lhs[i][BB][4][4]= lhs[i][BB][4][4] - coeff2*lhs[i][BB][4][3]; lhs[i][CC][0][4] = lhs[i][CC][0][4] - coeff2*lhs[i][CC][0][3]; lhs[i][CC][1][4] = lhs[i][CC][1][4] - coeff2*lhs[i][CC][1][3]; lhs[i][CC][2][4] = lhs[i][CC][2][4] - coeff2*lhs[i][CC][2][3]; lhs[i][CC][3][4] = lhs[i][CC][3][4] - coeff2*lhs[i][CC][3][3]; lhs[i][CC][4][4] = lhs[i][CC][4][4] - coeff2*lhs[i][CC][4][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff2*rhs[k][j][i][3]; pivot2 = 1.00/lhs[i][BB][4][4]; lhs[i][CC][0][4] = lhs[i][CC][0][4]*pivot2; lhs[i][CC][1][4] = lhs[i][CC][1][4]*pivot2; lhs[i][CC][2][4] = lhs[i][CC][2][4]*pivot2; lhs[i][CC][3][4] = lhs[i][CC][3][4]*pivot2; lhs[i][CC][4][4] = lhs[i][CC][4][4]*pivot2; rhs[k][j][i][4] = rhs[k][j][i][4] *pivot2; coeff2 = lhs[i][BB][4][0]; lhs[i][CC][0][0] = lhs[i][CC][0][0] - coeff2*lhs[i][CC][0][4]; lhs[i][CC][1][0] = lhs[i][CC][1][0] - coeff2*lhs[i][CC][1][4]; lhs[i][CC][2][0] = lhs[i][CC][2][0] - coeff2*lhs[i][CC][2][4]; lhs[i][CC][3][0] = lhs[i][CC][3][0] - coeff2*lhs[i][CC][3][4]; lhs[i][CC][4][0] = lhs[i][CC][4][0] - coeff2*lhs[i][CC][4][4]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff2*rhs[k][j][i][4]; coeff2 = lhs[i][BB][4][1]; lhs[i][CC][0][1] = lhs[i][CC][0][1] - coeff2*lhs[i][CC][0][4]; lhs[i][CC][1][1] = lhs[i][CC][1][1] - coeff2*lhs[i][CC][1][4]; lhs[i][CC][2][1] = lhs[i][CC][2][1] - coeff2*lhs[i][CC][2][4]; lhs[i][CC][3][1] = lhs[i][CC][3][1] - coeff2*lhs[i][CC][3][4]; lhs[i][CC][4][1] = lhs[i][CC][4][1] - coeff2*lhs[i][CC][4][4]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff2*rhs[k][j][i][4]; coeff2 = lhs[i][BB][4][2]; lhs[i][CC][0][2] = lhs[i][CC][0][2] - coeff2*lhs[i][CC][0][4]; lhs[i][CC][1][2] = lhs[i][CC][1][2] - coeff2*lhs[i][CC][1][4]; lhs[i][CC][2][2] = lhs[i][CC][2][2] - coeff2*lhs[i][CC][2][4]; lhs[i][CC][3][2] = lhs[i][CC][3][2] - coeff2*lhs[i][CC][3][4]; lhs[i][CC][4][2] = lhs[i][CC][4][2] - coeff2*lhs[i][CC][4][4]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff2*rhs[k][j][i][4]; coeff2 = lhs[i][BB][4][3]; lhs[i][CC][0][3] = lhs[i][CC][0][3] - coeff2*lhs[i][CC][0][4]; lhs[i][CC][1][3] = lhs[i][CC][1][3] - coeff2*lhs[i][CC][1][4]; lhs[i][CC][2][3] = lhs[i][CC][2][3] - coeff2*lhs[i][CC][2][4]; lhs[i][CC][3][3] = lhs[i][CC][3][3] - coeff2*lhs[i][CC][3][4]; lhs[i][CC][4][3] = lhs[i][CC][4][3] - coeff2*lhs[i][CC][4][4]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff2*rhs[k][j][i][4]; } //END of binvcrhs( lhs[i][BB], lhs[i][CC], rhs[k][j][i] ); } //END of for //--------------------------------------------------------------------- // rhs(isize) = rhs(isize) - A*rhs(isize-1) // // matvec_sub(lhs[isize][AA], rhs[k][j][isize-1], rhs[k][j][isize]); // void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) //--------------------------------------------------------------------- { rhs[k][j][isize][0] = rhs[k][j][isize][0] - lhs[isize][AA][0][0]*rhs[k][j][isize-1][0] - lhs[isize][AA][1][0]*rhs[k][j][isize-1][1] - lhs[isize][AA][2][0]*rhs[k][j][isize-1][2] - lhs[isize][AA][3][0]*rhs[k][j][isize-1][3] - lhs[isize][AA][4][0]*rhs[k][j][isize-1][4]; rhs[k][j][isize][1] = rhs[k][j][isize][1] - lhs[isize][AA][0][1]*rhs[k][j][isize-1][0] - lhs[isize][AA][1][1]*rhs[k][j][isize-1][1] - lhs[isize][AA][2][1]*rhs[k][j][isize-1][2] - lhs[isize][AA][3][1]*rhs[k][j][isize-1][3] - lhs[isize][AA][4][1]*rhs[k][j][isize-1][4]; rhs[k][j][isize][2] = rhs[k][j][isize][2] - lhs[isize][AA][0][2]*rhs[k][j][isize-1][0] - lhs[isize][AA][1][2]*rhs[k][j][isize-1][1] - lhs[isize][AA][2][2]*rhs[k][j][isize-1][2] - lhs[isize][AA][3][2]*rhs[k][j][isize-1][3] - lhs[isize][AA][4][2]*rhs[k][j][isize-1][4]; rhs[k][j][isize][3] = rhs[k][j][isize][3] - lhs[isize][AA][0][3]*rhs[k][j][isize-1][0] - lhs[isize][AA][1][3]*rhs[k][j][isize-1][1] - lhs[isize][AA][2][3]*rhs[k][j][isize-1][2] - lhs[isize][AA][3][3]*rhs[k][j][isize-1][3] - lhs[isize][AA][4][3]*rhs[k][j][isize-1][4]; rhs[k][j][isize][4] = rhs[k][j][isize][4] - lhs[isize][AA][0][4]*rhs[k][j][isize-1][0] - lhs[isize][AA][1][4]*rhs[k][j][isize-1][1] - lhs[isize][AA][2][4]*rhs[k][j][isize-1][2] - lhs[isize][AA][3][4]*rhs[k][j][isize-1][3] - lhs[isize][AA][4][4]*rhs[k][j][isize-1][4]; } //--------------------------------------------------------------------- // B(isize) = B(isize) - C(isize-1)*A(isize) // // matmul_sub(lhs[isize][AA], lhs[isize-1][CC], lhs[isize][BB]); // void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]) //--------------------------------------------------------------------- { lhs[isize][BB][0][0] = lhs[isize][BB][0][0] - lhs[isize][AA][0][0]*lhs[isize-1][CC][0][0] - lhs[isize][AA][1][0]*lhs[isize-1][CC][0][1] - lhs[isize][AA][2][0]*lhs[isize-1][CC][0][2] - lhs[isize][AA][3][0]*lhs[isize-1][CC][0][3] - lhs[isize][AA][4][0]*lhs[isize-1][CC][0][4]; lhs[isize][BB][0][1] = lhs[isize][BB][0][1] - lhs[isize][AA][0][1]*lhs[isize-1][CC][0][0] - lhs[isize][AA][1][1]*lhs[isize-1][CC][0][1] - lhs[isize][AA][2][1]*lhs[isize-1][CC][0][2] - lhs[isize][AA][3][1]*lhs[isize-1][CC][0][3] - lhs[isize][AA][4][1]*lhs[isize-1][CC][0][4]; lhs[isize][BB][0][2] = lhs[isize][BB][0][2] - lhs[isize][AA][0][2]*lhs[isize-1][CC][0][0] - lhs[isize][AA][1][2]*lhs[isize-1][CC][0][1] - lhs[isize][AA][2][2]*lhs[isize-1][CC][0][2] - lhs[isize][AA][3][2]*lhs[isize-1][CC][0][3] - lhs[isize][AA][4][2]*lhs[isize-1][CC][0][4]; lhs[isize][BB][0][3] = lhs[isize][BB][0][3] - lhs[isize][AA][0][3]*lhs[isize-1][CC][0][0] - lhs[isize][AA][1][3]*lhs[isize-1][CC][0][1] - lhs[isize][AA][2][3]*lhs[isize-1][CC][0][2] - lhs[isize][AA][3][3]*lhs[isize-1][CC][0][3] - lhs[isize][AA][4][3]*lhs[isize-1][CC][0][4]; lhs[isize][BB][0][4] = lhs[isize][BB][0][4] - lhs[isize][AA][0][4]*lhs[isize-1][CC][0][0] - lhs[isize][AA][1][4]*lhs[isize-1][CC][0][1] - lhs[isize][AA][2][4]*lhs[isize-1][CC][0][2] - lhs[isize][AA][3][4]*lhs[isize-1][CC][0][3] - lhs[isize][AA][4][4]*lhs[isize-1][CC][0][4]; lhs[isize][BB][1][0] = lhs[isize][BB][1][0] - lhs[isize][AA][0][0]*lhs[isize-1][CC][1][0] - lhs[isize][AA][1][0]*lhs[isize-1][CC][1][1] - lhs[isize][AA][2][0]*lhs[isize-1][CC][1][2] - lhs[isize][AA][3][0]*lhs[isize-1][CC][1][3] - lhs[isize][AA][4][0]*lhs[isize-1][CC][1][4]; lhs[isize][BB][1][1] = lhs[isize][BB][1][1] - lhs[isize][AA][0][1]*lhs[isize-1][CC][1][0] - lhs[isize][AA][1][1]*lhs[isize-1][CC][1][1] - lhs[isize][AA][2][1]*lhs[isize-1][CC][1][2] - lhs[isize][AA][3][1]*lhs[isize-1][CC][1][3] - lhs[isize][AA][4][1]*lhs[isize-1][CC][1][4]; lhs[isize][BB][1][2] = lhs[isize][BB][1][2] - lhs[isize][AA][0][2]*lhs[isize-1][CC][1][0] - lhs[isize][AA][1][2]*lhs[isize-1][CC][1][1] - lhs[isize][AA][2][2]*lhs[isize-1][CC][1][2] - lhs[isize][AA][3][2]*lhs[isize-1][CC][1][3] - lhs[isize][AA][4][2]*lhs[isize-1][CC][1][4]; lhs[isize][BB][1][3] = lhs[isize][BB][1][3] - lhs[isize][AA][0][3]*lhs[isize-1][CC][1][0] - lhs[isize][AA][1][3]*lhs[isize-1][CC][1][1] - lhs[isize][AA][2][3]*lhs[isize-1][CC][1][2] - lhs[isize][AA][3][3]*lhs[isize-1][CC][1][3] - lhs[isize][AA][4][3]*lhs[isize-1][CC][1][4]; lhs[isize][BB][1][4] = lhs[isize][BB][1][4] - lhs[isize][AA][0][4]*lhs[isize-1][CC][1][0] - lhs[isize][AA][1][4]*lhs[isize-1][CC][1][1] - lhs[isize][AA][2][4]*lhs[isize-1][CC][1][2] - lhs[isize][AA][3][4]*lhs[isize-1][CC][1][3] - lhs[isize][AA][4][4]*lhs[isize-1][CC][1][4]; lhs[isize][BB][2][0] = lhs[isize][BB][2][0] - lhs[isize][AA][0][0]*lhs[isize-1][CC][2][0] - lhs[isize][AA][1][0]*lhs[isize-1][CC][2][1] - lhs[isize][AA][2][0]*lhs[isize-1][CC][2][2] - lhs[isize][AA][3][0]*lhs[isize-1][CC][2][3] - lhs[isize][AA][4][0]*lhs[isize-1][CC][2][4]; lhs[isize][BB][2][1] = lhs[isize][BB][2][1] - lhs[isize][AA][0][1]*lhs[isize-1][CC][2][0] - lhs[isize][AA][1][1]*lhs[isize-1][CC][2][1] - lhs[isize][AA][2][1]*lhs[isize-1][CC][2][2] - lhs[isize][AA][3][1]*lhs[isize-1][CC][2][3] - lhs[isize][AA][4][1]*lhs[isize-1][CC][2][4]; lhs[isize][BB][2][2] = lhs[isize][BB][2][2] - lhs[isize][AA][0][2]*lhs[isize-1][CC][2][0] - lhs[isize][AA][1][2]*lhs[isize-1][CC][2][1] - lhs[isize][AA][2][2]*lhs[isize-1][CC][2][2] - lhs[isize][AA][3][2]*lhs[isize-1][CC][2][3] - lhs[isize][AA][4][2]*lhs[isize-1][CC][2][4]; lhs[isize][BB][2][3] = lhs[isize][BB][2][3] - lhs[isize][AA][0][3]*lhs[isize-1][CC][2][0] - lhs[isize][AA][1][3]*lhs[isize-1][CC][2][1] - lhs[isize][AA][2][3]*lhs[isize-1][CC][2][2] - lhs[isize][AA][3][3]*lhs[isize-1][CC][2][3] - lhs[isize][AA][4][3]*lhs[isize-1][CC][2][4]; lhs[isize][BB][2][4] = lhs[isize][BB][2][4] - lhs[isize][AA][0][4]*lhs[isize-1][CC][2][0] - lhs[isize][AA][1][4]*lhs[isize-1][CC][2][1] - lhs[isize][AA][2][4]*lhs[isize-1][CC][2][2] - lhs[isize][AA][3][4]*lhs[isize-1][CC][2][3] - lhs[isize][AA][4][4]*lhs[isize-1][CC][2][4]; lhs[isize][BB][3][0] = lhs[isize][BB][3][0] - lhs[isize][AA][0][0]*lhs[isize-1][CC][3][0] - lhs[isize][AA][1][0]*lhs[isize-1][CC][3][1] - lhs[isize][AA][2][0]*lhs[isize-1][CC][3][2] - lhs[isize][AA][3][0]*lhs[isize-1][CC][3][3] - lhs[isize][AA][4][0]*lhs[isize-1][CC][3][4]; lhs[isize][BB][3][1] = lhs[isize][BB][3][1] - lhs[isize][AA][0][1]*lhs[isize-1][CC][3][0] - lhs[isize][AA][1][1]*lhs[isize-1][CC][3][1] - lhs[isize][AA][2][1]*lhs[isize-1][CC][3][2] - lhs[isize][AA][3][1]*lhs[isize-1][CC][3][3] - lhs[isize][AA][4][1]*lhs[isize-1][CC][3][4]; lhs[isize][BB][3][2] = lhs[isize][BB][3][2] - lhs[isize][AA][0][2]*lhs[isize-1][CC][3][0] - lhs[isize][AA][1][2]*lhs[isize-1][CC][3][1] - lhs[isize][AA][2][2]*lhs[isize-1][CC][3][2] - lhs[isize][AA][3][2]*lhs[isize-1][CC][3][3] - lhs[isize][AA][4][2]*lhs[isize-1][CC][3][4]; lhs[isize][BB][3][3] = lhs[isize][BB][3][3] - lhs[isize][AA][0][3]*lhs[isize-1][CC][3][0] - lhs[isize][AA][1][3]*lhs[isize-1][CC][3][1] - lhs[isize][AA][2][3]*lhs[isize-1][CC][3][2] - lhs[isize][AA][3][3]*lhs[isize-1][CC][3][3] - lhs[isize][AA][4][3]*lhs[isize-1][CC][3][4]; lhs[isize][BB][3][4] = lhs[isize][BB][3][4] - lhs[isize][AA][0][4]*lhs[isize-1][CC][3][0] - lhs[isize][AA][1][4]*lhs[isize-1][CC][3][1] - lhs[isize][AA][2][4]*lhs[isize-1][CC][3][2] - lhs[isize][AA][3][4]*lhs[isize-1][CC][3][3] - lhs[isize][AA][4][4]*lhs[isize-1][CC][3][4]; lhs[isize][BB][4][0] = lhs[isize][BB][4][0] - lhs[isize][AA][0][0]*lhs[isize-1][CC][4][0] - lhs[isize][AA][1][0]*lhs[isize-1][CC][4][1] - lhs[isize][AA][2][0]*lhs[isize-1][CC][4][2] - lhs[isize][AA][3][0]*lhs[isize-1][CC][4][3] - lhs[isize][AA][4][0]*lhs[isize-1][CC][4][4]; lhs[isize][BB][4][1] = lhs[isize][BB][4][1] - lhs[isize][AA][0][1]*lhs[isize-1][CC][4][0] - lhs[isize][AA][1][1]*lhs[isize-1][CC][4][1] - lhs[isize][AA][2][1]*lhs[isize-1][CC][4][2] - lhs[isize][AA][3][1]*lhs[isize-1][CC][4][3] - lhs[isize][AA][4][1]*lhs[isize-1][CC][4][4]; lhs[isize][BB][4][2] = lhs[isize][BB][4][2] - lhs[isize][AA][0][2]*lhs[isize-1][CC][4][0] - lhs[isize][AA][1][2]*lhs[isize-1][CC][4][1] - lhs[isize][AA][2][2]*lhs[isize-1][CC][4][2] - lhs[isize][AA][3][2]*lhs[isize-1][CC][4][3] - lhs[isize][AA][4][2]*lhs[isize-1][CC][4][4]; lhs[isize][BB][4][3] = lhs[isize][BB][4][3] - lhs[isize][AA][0][3]*lhs[isize-1][CC][4][0] - lhs[isize][AA][1][3]*lhs[isize-1][CC][4][1] - lhs[isize][AA][2][3]*lhs[isize-1][CC][4][2] - lhs[isize][AA][3][3]*lhs[isize-1][CC][4][3] - lhs[isize][AA][4][3]*lhs[isize-1][CC][4][4]; lhs[isize][BB][4][4] = lhs[isize][BB][4][4] - lhs[isize][AA][0][4]*lhs[isize-1][CC][4][0] - lhs[isize][AA][1][4]*lhs[isize-1][CC][4][1] - lhs[isize][AA][2][4]*lhs[isize-1][CC][4][2] - lhs[isize][AA][3][4]*lhs[isize-1][CC][4][3] - lhs[isize][AA][4][4]*lhs[isize-1][CC][4][4]; } // END of matmul_sub(lhs[isize][AA], lhs[isize-1][CC], lhs[isize][BB]); //--------------------------------------------------------------------- // multiply rhs() by b_inverse() and copy to rhs // // binvrhs( lhs[isize][BB], rhs[k][j][isize] ); // void binvrhs(double lhs[5][5], double r[5]) //--------------------------------------------------------------------- { pivot3 = 1.00/lhs[isize][BB][0][0]; lhs[isize][BB][1][0] = lhs[isize][BB][1][0]*pivot3; lhs[isize][BB][2][0] = lhs[isize][BB][2][0]*pivot3; lhs[isize][BB][3][0] = lhs[isize][BB][3][0]*pivot3; lhs[isize][BB][4][0] = lhs[isize][BB][4][0]*pivot3; rhs[k][j][isize][0] = rhs[k][j][isize][0] *pivot3; coeff3 = lhs[isize][BB][0][1]; lhs[isize][BB][1][1]= lhs[isize][BB][1][1] - coeff3*lhs[isize][BB][1][0]; lhs[isize][BB][2][1]= lhs[isize][BB][2][1] - coeff3*lhs[isize][BB][2][0]; lhs[isize][BB][3][1]= lhs[isize][BB][3][1] - coeff3*lhs[isize][BB][3][0]; lhs[isize][BB][4][1]= lhs[isize][BB][4][1] - coeff3*lhs[isize][BB][4][0]; rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff3*rhs[k][j][isize][0]; coeff3 = lhs[isize][BB][0][2]; lhs[isize][BB][1][2]= lhs[isize][BB][1][2] - coeff3*lhs[isize][BB][1][0]; lhs[isize][BB][2][2]= lhs[isize][BB][2][2] - coeff3*lhs[isize][BB][2][0]; lhs[isize][BB][3][2]= lhs[isize][BB][3][2] - coeff3*lhs[isize][BB][3][0]; lhs[isize][BB][4][2]= lhs[isize][BB][4][2] - coeff3*lhs[isize][BB][4][0]; rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff3*rhs[k][j][isize][0]; coeff3 = lhs[isize][BB][0][3]; lhs[isize][BB][1][3]= lhs[isize][BB][1][3] - coeff3*lhs[isize][BB][1][0]; lhs[isize][BB][2][3]= lhs[isize][BB][2][3] - coeff3*lhs[isize][BB][2][0]; lhs[isize][BB][3][3]= lhs[isize][BB][3][3] - coeff3*lhs[isize][BB][3][0]; lhs[isize][BB][4][3]= lhs[isize][BB][4][3] - coeff3*lhs[isize][BB][4][0]; rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff3*rhs[k][j][isize][0]; coeff3 = lhs[isize][BB][0][4]; lhs[isize][BB][1][4]= lhs[isize][BB][1][4] - coeff3*lhs[isize][BB][1][0]; lhs[isize][BB][2][4]= lhs[isize][BB][2][4] - coeff3*lhs[isize][BB][2][0]; lhs[isize][BB][3][4]= lhs[isize][BB][3][4] - coeff3*lhs[isize][BB][3][0]; lhs[isize][BB][4][4]= lhs[isize][BB][4][4] - coeff3*lhs[isize][BB][4][0]; rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff3*rhs[k][j][isize][0]; pivot3 = 1.00/lhs[isize][BB][1][1]; lhs[isize][BB][2][1] = lhs[isize][BB][2][1]*pivot3; lhs[isize][BB][3][1] = lhs[isize][BB][3][1]*pivot3; lhs[isize][BB][4][1] = lhs[isize][BB][4][1]*pivot3; rhs[k][j][isize][1] = rhs[k][j][isize][1] *pivot3; coeff3 = lhs[isize][BB][1][0]; lhs[isize][BB][2][0]= lhs[isize][BB][2][0] - coeff3*lhs[isize][BB][2][1]; lhs[isize][BB][3][0]= lhs[isize][BB][3][0] - coeff3*lhs[isize][BB][3][1]; lhs[isize][BB][4][0]= lhs[isize][BB][4][0] - coeff3*lhs[isize][BB][4][1]; rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff3*rhs[k][j][isize][1]; coeff3 = lhs[isize][BB][1][2]; lhs[isize][BB][2][2]= lhs[isize][BB][2][2] - coeff3*lhs[isize][BB][2][1]; lhs[isize][BB][3][2]= lhs[isize][BB][3][2] - coeff3*lhs[isize][BB][3][1]; lhs[isize][BB][4][2]= lhs[isize][BB][4][2] - coeff3*lhs[isize][BB][4][1]; rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff3*rhs[k][j][isize][1]; coeff3 = lhs[isize][BB][1][3]; lhs[isize][BB][2][3]= lhs[isize][BB][2][3] - coeff3*lhs[isize][BB][2][1]; lhs[isize][BB][3][3]= lhs[isize][BB][3][3] - coeff3*lhs[isize][BB][3][1]; lhs[isize][BB][4][3]= lhs[isize][BB][4][3] - coeff3*lhs[isize][BB][4][1]; rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff3*rhs[k][j][isize][1]; coeff3 = lhs[isize][BB][1][4]; lhs[isize][BB][2][4]= lhs[isize][BB][2][4] - coeff3*lhs[isize][BB][2][1]; lhs[isize][BB][3][4]= lhs[isize][BB][3][4] - coeff3*lhs[isize][BB][3][1]; lhs[isize][BB][4][4]= lhs[isize][BB][4][4] - coeff3*lhs[isize][BB][4][1]; rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff3*rhs[k][j][isize][1]; pivot3 = 1.00/lhs[isize][BB][2][2]; lhs[isize][BB][3][2] = lhs[isize][BB][3][2]*pivot3; lhs[isize][BB][4][2] = lhs[isize][BB][4][2]*pivot3; rhs[k][j][isize][2] = rhs[k][j][isize][2] *pivot3; coeff3 = lhs[isize][BB][2][0]; lhs[isize][BB][3][0]= lhs[isize][BB][3][0] - coeff3*lhs[isize][BB][3][2]; lhs[isize][BB][4][0]= lhs[isize][BB][4][0] - coeff3*lhs[isize][BB][4][2]; rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff3*rhs[k][j][isize][2]; coeff3 = lhs[isize][BB][2][1]; lhs[isize][BB][3][1]= lhs[isize][BB][3][1] - coeff3*lhs[isize][BB][3][2]; lhs[isize][BB][4][1]= lhs[isize][BB][4][1] - coeff3*lhs[isize][BB][4][2]; rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff3*rhs[k][j][isize][2]; coeff3 = lhs[isize][BB][2][3]; lhs[isize][BB][3][3]= lhs[isize][BB][3][3] - coeff3*lhs[isize][BB][3][2]; lhs[isize][BB][4][3]= lhs[isize][BB][4][3] - coeff3*lhs[isize][BB][4][2]; rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff3*rhs[k][j][isize][2]; coeff3 = lhs[isize][BB][2][4]; lhs[isize][BB][3][4]= lhs[isize][BB][3][4] - coeff3*lhs[isize][BB][3][2]; lhs[isize][BB][4][4]= lhs[isize][BB][4][4] - coeff3*lhs[isize][BB][4][2]; rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff3*rhs[k][j][isize][2]; pivot3 = 1.00/lhs[isize][BB][3][3]; lhs[isize][BB][4][3] = lhs[isize][BB][4][3]*pivot3; rhs[k][j][isize][3] = rhs[k][j][isize][3] *pivot3; coeff3 = lhs[isize][BB][3][0]; lhs[isize][BB][4][0]= lhs[isize][BB][4][0] - coeff3*lhs[isize][BB][4][3]; rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff3*rhs[k][j][isize][3]; coeff3 = lhs[isize][BB][3][1]; lhs[isize][BB][4][1]= lhs[isize][BB][4][1] - coeff3*lhs[isize][BB][4][3]; rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff3*rhs[k][j][isize][3]; coeff3 = lhs[isize][BB][3][2]; lhs[isize][BB][4][2]= lhs[isize][BB][4][2] - coeff3*lhs[isize][BB][4][3]; rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff3*rhs[k][j][isize][3]; coeff3 = lhs[isize][BB][3][4]; lhs[isize][BB][4][4]= lhs[isize][BB][4][4] - coeff3*lhs[isize][BB][4][3]; rhs[k][j][isize][4] = rhs[k][j][isize][4] - coeff3*rhs[k][j][isize][3]; pivot3 = 1.00/lhs[isize][BB][4][4]; rhs[k][j][isize][4] = rhs[k][j][isize][4] *pivot3; coeff3 = lhs[isize][BB][4][0]; rhs[k][j][isize][0] = rhs[k][j][isize][0] - coeff3*rhs[k][j][isize][4]; coeff3 = lhs[isize][BB][4][1]; rhs[k][j][isize][1] = rhs[k][j][isize][1] - coeff3*rhs[k][j][isize][4]; coeff3 = lhs[isize][BB][4][2]; rhs[k][j][isize][2] = rhs[k][j][isize][2] - coeff3*rhs[k][j][isize][4]; coeff3 = lhs[isize][BB][4][3]; rhs[k][j][isize][3] = rhs[k][j][isize][3] - coeff3*rhs[k][j][isize][4]; }//END of binvrhs( lhs[isize][BB], rhs[k][j][isize] ); //--------------------------------------------------------------------- // back solve: if last cell, then generate U(isize)=rhs(isize) // else assume U(isize) is loaded in un pack backsub_info // so just use it // after u(istart) will be sent to next cell //--------------------------------------------------------------------- for (i = isize-1; i >=0; i--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[i][CC][n][m]*rhs[k][j][i+1][n]; } } } } } #pragma endscop }
bitcoin_fmt_plug.c
/* bitcoin-qt (bitcoin) wallet cracker patch for JtR. Hacked together during * April of 2013 by Dhiru Kholia <dhiru at openwall dot com>. * * Also works for Litecoin-Qt (litecoin) wallet files! * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru at openwall dot com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * This cracks password protected bitcoin (bitcoin-qt) "wallet" files. * * bitcoin => https://github.com/bitcoin/bitcoin * * Thanks to Solar for asking to add support for bitcoin wallet files. * * Works fine with bitcoin-core-0.14.0 from March, 2017. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_bitcoin; #elif FMT_REGISTERS_H john_register_one(&fmt_bitcoin); #else #include <stdint.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif static int omp_t = 1; #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "sha2.h" #include "aes.h" #include "johnswap.h" #include "simd-intrinsics.h" #include "jumbo.h" #include "memdbg.h" #define FORMAT_LABEL "Bitcoin" #define FORMAT_NAME "Bitcoin Core" #define FORMAT_TAG "$bitcoin$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "SHA512 AES " SHA512_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "SHA512 AES 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "SHA512 AES 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #if !defined (SHA512_DIGEST_LENGTH) #define SHA512_DIGEST_LENGTH 64 #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #define SALT_SIZE sizeof(struct custom_salt) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SZ 128 static struct fmt_tests bitcoin_tests[] = { /* bitcoin wallet hashes */ {"$bitcoin$96$169ce74743c260678fbbba92e926198702fd84e46ba555190f6f3d82f6852e4adeaa340d2ac065288e8605f13d1d7c86$16$26049c64dda292d5$177864$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "openwall"}, {"$bitcoin$96$bd97a08e00e38910550e76848949285b9702fe64460f70d464feb2b63f83e1194c745e58fa4a0f09ac35e5777c507839$16$26049c64dda292d5$258507$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "password"}, {"$bitcoin$96$4eca412eeb04971428efec70c9e18fb9375be0aa105e7eec55e528d0ba33a07eb6302add36da86736054dee9140ec9b8$16$26049c64dda292d5$265155$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "strongpassword"}, /* litecoin wallet hash */ {"$bitcoin$96$54401984b32448917b6d18b7a11debe91d62aaa343ab62ed98e1d3063f30817832c744360331df94cbf1dcececf6d00e$16$bfbc8ee2c07bbb4b$194787$96$07a206d5422640cfa65a8482298ad8e8598b94d99e2c4ce09c9d015b734632778cb46541b8c10284b9e14e5468b654b9$66$03fe6587bf580ee38b719f0b8689c80d300840bbc378707dce51e6f1fe20f49c20", "isyourpasswordstronger"}, /* bitcoin-core-0.14.0 wallet */ {"$bitcoin$96$8e7be42551c822c7e55a384e15b4fbfec69ceaed000925870dfb262d3381ed4405507f6c94defbae174a218eed0b5ce8$16$b469e6dbd76926cf$244139$96$ec03604094ada8a5d76bbdb455d260ac8b202ec475d5362d334314c4e7012a2f4b8f9cf8761c9862cd20892e138cd29e$66$03fdd0341a72d1a119ea1de51e477f0687a2bf601c07c032cc87ef82e0f8f49b19", "password@12345"}, /* bitcoin-core-0.14.0 wallet */ {"$bitcoin$96$2559c50151aeec013a9820c571fbee02e5892a3ead07607ee8de9d0ff55798cff6fe60dbd71d7873cb794a03e0d63b70$16$672204f8ab168ff6$136157$96$a437e8bd884c928603ee00cf85eaaf9245a071efa763db03ab485cb757f155976edc7294a6a731734f383850fcac4316$66$03ff84bb48f454662b91a6e588af8752da0674efa5dae82e7340152afcc38f4ba4", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int any_cracked, *cracked; static size_t cracked_size; static struct custom_salt { unsigned char cry_master[SZ]; int cry_master_length; unsigned char cry_salt[SZ]; int cry_salt_length; int cry_rounds; unsigned char ckey[SZ]; int ckey_length; unsigned char public_key[SZ]; int public_key_length; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc_align(sizeof(*cracked), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } // #define BTC_DEBUG #ifdef BTC_DEBUG static void print_hex(unsigned char *str, int len) { int i; for (i = 0; i < len; ++i) printf("%02x", str[i]); printf("\n"); } #endif static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p = NULL; int res; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "$")) == NULL) /* cry_master_length (of the hex string) */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* cry_master */ goto err; if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and cry_master */ goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* cry_salt_length (length of hex string) */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* cry_salt */ goto err; if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and cry_salt */ goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* cry_rounds */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* ckey_length (of hex) */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* ckey */ goto err; if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and ckey */ goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* public_key_length */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* public_key */ goto err; if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and public_key */ goto err; if (!ishexlc(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { int i; char *p; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; p = strtokm(ctcopy, "$"); cs.cry_master_length = atoi(p) / 2; p = strtokm(NULL, "$"); for (i = 0; i < cs.cry_master_length; i++) cs.cry_master[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.cry_salt_length = atoi(p) / 2; p = strtokm(NULL, "$"); for (i = 0; i < cs.cry_salt_length; i++) cs.cry_salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.cry_rounds = atoi(p); p = strtokm(NULL, "$"); cs.ckey_length = atoi(p) / 2; p = strtokm(NULL, "$"); for (i = 0; i < cs.ckey_length; i++) cs.ckey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.public_key_length = atoi(p) / 2; p = strtokm(NULL, "$"); for (i = 0; i < cs.public_key_length; i++) cs.public_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char output[SZ]; SHA512_CTX sha_ctx; int i; #ifdef SIMD_COEF_64 char unaligned_buf[MAX_KEYS_PER_CRYPT*SHA_BUF_SIZ*sizeof(uint64_t)+MEM_ALIGN_SIMD]; uint64_t *key_iv = (uint64_t*)mem_align(unaligned_buf, MEM_ALIGN_SIMD); JTR_ALIGN(8) unsigned char hash1[SHA512_DIGEST_LENGTH]; // 512 bits int index2; for (index2 = 0; index2 < MAX_KEYS_PER_CRYPT; index2++) { // The first hash for this password SHA512_Init(&sha_ctx); SHA512_Update(&sha_ctx, saved_key[index+index2], strlen(saved_key[index+index2])); SHA512_Update(&sha_ctx, cur_salt->cry_salt, cur_salt->cry_salt_length); SHA512_Final(hash1, &sha_ctx); // Now copy and convert hash1 from flat into SIMD_COEF_64 buffers. for (i = 0; i < SHA512_DIGEST_LENGTH/sizeof(uint64_t); ++i) { #if COMMON_DIGEST_FOR_OPENSSL key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = sha_ctx.hash[i]; // this is in BE format #else key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = sha_ctx.h[i]; #endif } // We need to set ONE time, the upper half of the data buffer. We put the 0x80 byte (in BE format), at offset // 512-bits (SHA512_DIGEST_LENGTH) multiplied by the SIMD_COEF_64 (same as MAX_KEYS_PER_CRYPT), then zero // out the rest of the buffer, putting 512 (#bits) at the end. Once this part of the buffer is set up, we never // touch it again, for the rest of the crypt. We simply overwrite the first half of this buffer, over and over // again, with BE results of the prior hash. key_iv[ SHA512_DIGEST_LENGTH/sizeof(uint64_t) * SIMD_COEF_64 + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64 ] = 0x8000000000000000ULL; for (i = (SHA512_DIGEST_LENGTH/sizeof(uint64_t)+1); i < 15; i++) key_iv[i*SIMD_COEF_64 + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = 0; key_iv[15*SIMD_COEF_64 + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (SHA512_DIGEST_LENGTH << 3); } for (i = 1; i < cur_salt->cry_rounds; i++) // start at 1; the first iteration is already done SIMDSHA512body(key_iv, key_iv, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT); for (index2 = 0; index2 < MAX_KEYS_PER_CRYPT; index2++) { AES_KEY aes_key; unsigned char key[32]; unsigned char iv[16]; // Copy and convert from SIMD_COEF_64 buffers back into flat buffers, in little-endian #if ARCH_LITTLE_ENDIAN==1 for (i = 0; i < sizeof(key)/sizeof(uint64_t); i++) // the derived key ((uint64_t *)key)[i] = JOHNSWAP64(key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]); for (i = 0; i < sizeof(iv)/sizeof(uint64_t); i++) // the derived iv ((uint64_t *)iv)[i] = JOHNSWAP64(key_iv[SIMD_COEF_64*(sizeof(key)/sizeof(uint64_t) + i) + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]); #else for (i = 0; i < sizeof(key)/sizeof(uint64_t); i++) // the derived key ((uint64_t *)key)[i] = key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; for (i = 0; i < sizeof(iv)/sizeof(uint64_t); i++) // the derived iv ((uint64_t *)iv)[i] = key_iv[SIMD_COEF_64*(sizeof(key)/sizeof(uint64_t) + i) + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; #endif AES_set_decrypt_key(key, 256, &aes_key); AES_cbc_encrypt(cur_salt->cry_master, output, cur_salt->cry_master_length, &aes_key, iv, AES_DECRYPT); if (check_pkcs_pad(output, cur_salt->cry_master_length, 16) == 32) { cracked[index + index2] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } #else AES_KEY aes_key; unsigned char key_iv[SHA512_DIGEST_LENGTH]; // buffer for both the derived key and iv SHA512_Init(&sha_ctx); SHA512_Update(&sha_ctx, saved_key[index], strlen(saved_key[index])); SHA512_Update(&sha_ctx, cur_salt->cry_salt, cur_salt->cry_salt_length); SHA512_Final(key_iv, &sha_ctx); for (i = 1; i < cur_salt->cry_rounds; i++) { // start at 1; the first iteration is already done SHA512_Init(&sha_ctx); SHA512_Update(&sha_ctx, key_iv, SHA512_DIGEST_LENGTH); SHA512_Final(key_iv, &sha_ctx); } AES_set_decrypt_key(key_iv, 256, &aes_key); AES_cbc_encrypt(cur_salt->cry_master, output, cur_salt->cry_master_length, &aes_key, key_iv + 32, AES_DECRYPT); if (check_pkcs_pad(output, cur_salt->cry_master_length, 16) == 32) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } #endif } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return cracked[index]; } static void bitcoin_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->cry_rounds; } struct fmt_main fmt_bitcoin = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, bitcoin_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, bitcoin_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
residualbased_newton_raphson_mpc_contact_strategy.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" // Strategies #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // Contact criteria #include "custom_strategies/custom_convergencecriterias/mpc_contact_criteria.h" // Utilities #include "utilities/variable_utils.h" #include "utilities/color_utilities.h" #include "utilities/math_utils.h" #include "utilities/atomic_utilities.h" // // Processes // #include "processes/fast_transfer_between_model_parts_process.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonMPCContactStrategy * @ingroup ContactStructuralMechanicsApplication * @brief Contact Newton Raphson class * @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonMPCContactStrategy : public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonMPCContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; typedef MPCContactCriteria<TSparseSpace, TDenseSpace> TMPCContactCriteriaType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::MasterSlaveConstraintContainerType ConstraintArrayType; typedef std::size_t IndexType; typedef std::size_t SizeType; /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~ResidualBasedNewtonRaphsonMPCContactStrategy() override = default; //******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************// //***********************************************************************************// /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the * values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY BaseType::Predict(); // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // We get the system TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // We solve the system in order to check the active set once TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); typename TSchemeType::Pointer p_scheme = BaseType::GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver(); p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); // Check active set const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel(); BaseType::mpConvergenceCriteria->SetEchoLevel(0); mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); BaseType::mpConvergenceCriteria->SetEchoLevel(echo_level_convergence_criteria); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; // Computing nodal weights ComputeNodalWeights(); BaseType::Initialize(); KRATOS_CATCH(""); } /** * @brief The problem of interest is solved. * @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), * SolveSolutionStep() and FinalizeSolutionStep(). * All those functions can otherwise be called separately. */ double Solve() override { this->Initialize(); this->InitializeSolutionStep(); this->Predict(); this->SolveSolutionStep(); this->FinalizeSolutionStep(); // TODO: Comment for proper work of interaction return 0.0; } /** * @brief Performs all the required operations that should be done (for each step) * before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { // Computing nodal weights ComputeNodalWeights(); BaseType::InitializeSolutionStep(); // // If enforcing NTN // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // EnforcingNTN(); // } } /** * @brief Performs all the required operations that should be done (for each step) * after solving the solution step. */ void FinalizeSolutionStep() override { KRATOS_TRY; BaseType::FinalizeSolutionStep(); KRATOS_CATCH(""); } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { KRATOS_TRY; bool is_converged = false; // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // We get the process info ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); if (r_process_info.Is(INTERACTION)) { // We get the system TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; int inner_iteration = 0; const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel(); while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) { ++inner_iteration; if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) { KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << inner_iteration << std::endl; } // We solve one loop r_process_info[NL_ITERATION_NUMBER] = 1; is_converged = AuxiliarSolveSolutionStep(); // We check the convergence if (r_process_info[NL_ITERATION_NUMBER] == 1) r_process_info[NL_ITERATION_NUMBER] = 2; // Trigger check is_converged = mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) { if (is_converged) KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl; else KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl; } } } else { is_converged = AuxiliarSolveSolutionStep(); } return is_converged; KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. (auxiliar method) */ bool AuxiliarSolveSolutionStep() { // Getting flag INTERACTION ModelPart& r_model_part = StrategyBaseType::GetModelPart(); const bool update_each_nl_iteration = mThisParameters["update_each_nl_iteration"].GetBool(); VariableUtils().SetFlag(INTERACTION, update_each_nl_iteration, r_model_part.GetSubModelPart("ComputingContact").Conditions()); // Pointers needed in the solution typename TSchemeType::Pointer p_scheme = this->GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = this->GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // Initializing the parameters of the Newton-Raphson cycle unsigned int iteration_number = 1; r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; // Computing nodal weights ComputeNodalWeights(); p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); // // If enforcing NTN // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // EnforcingNTN(); // } // Function to perform the building and the solving phase. if (StrategyBaseType::mRebuildLevel > 0 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); } // Iteration Cycle... performed only for NonLinearProblems while (!is_converged && iteration_number++ < BaseType::mMaxIterationNumber) { // Setting the number of iteration r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; // Computing nodal weights ComputeNodalWeights(); // Calling InitializeNonLinIteration p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); // Shaping correctly the system if (update_each_nl_iteration) { p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part); p_builder_and_solver->SetUpSystem(r_model_part); p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, BaseType::mpA, BaseType::mpDx, BaseType::mpb, r_model_part); } is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); // Call the linear system solver to find the correction mDx for the it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (StrategyBaseType::mRebuildLevel > 1 || !StrategyBaseType::mStiffnessMatrixIsBuilt) { if (!BaseType::GetKeepSystemConstantDuringIterations()) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); } } // Plots a warning if the maximum number of iterations is exceeded if (iteration_number >= BaseType::mMaxIterationNumber) { BaseType::MaxIterationsExceeded(); } else { KRATOS_INFO_IF("NR-Strategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << BaseType::mMaxIterationNumber << " iterations" << std::endl; } // Recalculate residual if needed (note that some convergence criteria need it to be recalculated) if (!residual_is_updated) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Parameters mThisParameters; /// The configuration parameters typename TConvergenceCriteriaType::Pointer mpMPCContactCriteria; /// The contact criteria ///@} ///@name Protected Operators ///@{ /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "inner_loop_iterations" : 5, "update_each_nl_iteration" : false, "enforce_ntn" : false })" ); return default_parameters; } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonMPCContactStrategy(const ResidualBasedNewtonRaphsonMPCContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ // /** // * @brief This inforces NTN formulation // */ // void EnforcingNTN() // { // // List of enforced nodes to not repeat // std::unordered_set<IndexType> enforced_nodes; // // // Getting contact model part // ModelPart& r_root_model_part = StrategyBaseType::GetModelPart().GetRootModelPart(); // ModelPart& r_computing_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("ComputingContact"); // // // The process info // const auto& r_process_info = r_root_model_part.GetProcessInfo(); // // // Reset the pointers of the conditions // for (auto& r_cond : r_computing_contact_model_part.Conditions()) { // if (r_cond.Has(CONSTRAINT_POINTER)) { // r_cond.SetValue(CONSTRAINT_POINTER, nullptr); // } // } // // // Iterate over the constraints // IndexType counter = 1; // for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) { // r_const.SetId(counter); // ++counter; // } // // // Auxiliar classes // Matrix original_relation_matrix, relation_matrix; // Vector original_constant_vector, constant_vector; // ModelPart::DofsVectorType original_master_dofs, master_dofs, original_slave_dofs, slave_dofs; // // // Iterate over the constraints // for (auto& r_const : r_computing_contact_model_part.MasterSlaveConstraints()) { // // Getting original system // r_const.GetLocalSystem(original_relation_matrix, original_constant_vector, r_process_info); // r_const.GetDofList(original_slave_dofs, original_master_dofs, r_process_info); // // // TODO: Finish rebuild // // // Creating new constraint // r_root_model_part.CreateNewMasterSlaveConstraint("LinearMasterSlaveConstraint", counter, master_dofs, slave_dofs, relation_matrix, constant_vector); // // // Setting to remove the old constraints // r_const.Set(TO_ERASE, true); // // ++counter; // } // // // Remove old constraints // r_root_model_part.RemoveMasterSlaveConstraintsFromAllLevels(TO_ERASE); // // // Transfer constraints from the root to the computing model part // FastTransferBetweenModelPartsProcess(r_computing_contact_model_part, r_root_model_part, FastTransferBetweenModelPartsProcess::EntityTransfered::CONSTRAINTS).Execute(); // // // Reorder ids // counter = 1; // for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) { // r_const.SetId(counter); // ++counter; // } // } /** * @brief This computes the nodal weights */ void ComputeNodalWeights() { // Getting contact model part ModelPart& r_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("Contact"); // Reset the NODAL_PAUX and NODAL_MAUX auto& r_nodes_array = r_contact_model_part.Nodes(); VariableUtils().SetNonHistoricalVariableToZero(NODAL_PAUX, r_nodes_array); VariableUtils().SetNonHistoricalVariableToZero(NODAL_MAUX, r_nodes_array); // We set the constraints active and inactive in function of the active set auto& r_conditions_array = r_contact_model_part.Conditions(); auto it_cond_begin = r_conditions_array.begin(); // If enforcing NTN const bool enforce_ntn = false; // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // VariableUtils().SetNonHistoricalVariable(NODAL_PAUX, 1.0, r_nodes_array); // } #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; // Only slave conditions if (it_cond->Is(SLAVE)) { auto& r_geometry = it_cond->GetGeometry(); Vector lumping_factor; lumping_factor = r_geometry.LumpingFactors(lumping_factor); const double domain_size = r_geometry.DomainSize(); for (IndexType i_node = 0; i_node < r_geometry.size(); ++i_node) { auto& r_node = r_geometry[i_node]; if (!enforce_ntn) { AtomicAdd(r_node.GetValue(NODAL_PAUX), 1.0); } AtomicAdd(r_node.GetValue(NODAL_MAUX), lumping_factor[i_node] * domain_size); } } } } ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedNewtonRaphsonMPCContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY */
omp-simd-clone.c
/* OMP constructs' SIMD clone supporting code. Copyright (C) 2005-2020 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "target.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "alloc-pool.h" #include "tree-pass.h" #include "ssa.h" #include "cgraph.h" #include "pretty-print.h" #include "diagnostic-core.h" #include "fold-const.h" #include "stor-layout.h" #include "cfganal.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "gimple-walk.h" #include "langhooks.h" #include "tree-cfg.h" #include "tree-into-ssa.h" #include "tree-dfa.h" #include "cfgloop.h" #include "symbol-summary.h" #include "ipa-param-manipulation.h" #include "tree-eh.h" #include "varasm.h" #include "stringpool.h" #include "attribs.h" #include "omp-simd-clone.h" /* Return the number of elements in vector type VECTYPE, which is associated with a SIMD clone. At present these always have a constant length. */ static unsigned HOST_WIDE_INT simd_clone_subparts (tree vectype) { return TYPE_VECTOR_SUBPARTS (vectype).to_constant (); } /* Allocate a fresh `simd_clone' and return it. NARGS is the number of arguments to reserve space for. */ static struct cgraph_simd_clone * simd_clone_struct_alloc (int nargs) { struct cgraph_simd_clone *clone_info; size_t len = (sizeof (struct cgraph_simd_clone) + nargs * sizeof (struct cgraph_simd_clone_arg)); clone_info = (struct cgraph_simd_clone *) ggc_internal_cleared_alloc (len); return clone_info; } /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */ static inline void simd_clone_struct_copy (struct cgraph_simd_clone *to, struct cgraph_simd_clone *from) { memcpy (to, from, (sizeof (struct cgraph_simd_clone) + ((from->nargs - from->inbranch) * sizeof (struct cgraph_simd_clone_arg)))); } /* Fill an empty vector ARGS with parameter types of function FNDECL. This uses TYPE_ARG_TYPES if available, otherwise falls back to types of DECL_ARGUMENTS types. */ static void simd_clone_vector_of_formal_parm_types (vec<tree> *args, tree fndecl) { if (TYPE_ARG_TYPES (TREE_TYPE (fndecl))) { push_function_arg_types (args, TREE_TYPE (fndecl)); return; } push_function_arg_decls (args, fndecl); unsigned int i; tree arg; FOR_EACH_VEC_ELT (*args, i, arg) (*args)[i] = TREE_TYPE ((*args)[i]); } /* Given a simd function in NODE, extract the simd specific information from the OMP clauses passed in CLAUSES, and return the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED is set to TRUE if the `inbranch' or `notinbranch' clause specified, otherwise set to FALSE. */ static struct cgraph_simd_clone * simd_clone_clauses_extract (struct cgraph_node *node, tree clauses, bool *inbranch_specified) { auto_vec<tree> args; simd_clone_vector_of_formal_parm_types (&args, node->decl); tree t; int n; *inbranch_specified = false; n = args.length (); if (n > 0 && args.last () == void_type_node) n--; /* Allocate one more than needed just in case this is an in-branch clone which will require a mask argument. */ struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1); clone_info->nargs = n; if (!clauses) goto out; clauses = TREE_VALUE (clauses); if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE) goto out; for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t)) { switch (OMP_CLAUSE_CODE (t)) { case OMP_CLAUSE_INBRANCH: clone_info->inbranch = 1; *inbranch_specified = true; break; case OMP_CLAUSE_NOTINBRANCH: clone_info->inbranch = 0; *inbranch_specified = true; break; case OMP_CLAUSE_SIMDLEN: clone_info->simdlen = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t)); break; case OMP_CLAUSE_LINEAR: { tree decl = OMP_CLAUSE_DECL (t); tree step = OMP_CLAUSE_LINEAR_STEP (t); int argno = TREE_INT_CST_LOW (decl); if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t)) { enum cgraph_simd_clone_arg_type arg_type; if (TREE_CODE (args[argno]) == REFERENCE_TYPE) switch (OMP_CLAUSE_LINEAR_KIND (t)) { case OMP_CLAUSE_LINEAR_REF: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP; break; case OMP_CLAUSE_LINEAR_UVAL: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP; break; case OMP_CLAUSE_LINEAR_VAL: case OMP_CLAUSE_LINEAR_DEFAULT: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP; break; default: gcc_unreachable (); } else arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP; clone_info->args[argno].arg_type = arg_type; clone_info->args[argno].linear_step = tree_to_shwi (step); gcc_assert (clone_info->args[argno].linear_step >= 0 && clone_info->args[argno].linear_step < n); } else { if (POINTER_TYPE_P (args[argno])) step = fold_convert (ssizetype, step); if (!tree_fits_shwi_p (step)) { warning_at (OMP_CLAUSE_LOCATION (t), 0, "ignoring large linear step"); return NULL; } else if (integer_zerop (step)) { warning_at (OMP_CLAUSE_LOCATION (t), 0, "ignoring zero linear step"); return NULL; } else { enum cgraph_simd_clone_arg_type arg_type; if (TREE_CODE (args[argno]) == REFERENCE_TYPE) switch (OMP_CLAUSE_LINEAR_KIND (t)) { case OMP_CLAUSE_LINEAR_REF: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP; break; case OMP_CLAUSE_LINEAR_UVAL: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP; break; case OMP_CLAUSE_LINEAR_VAL: case OMP_CLAUSE_LINEAR_DEFAULT: arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP; break; default: gcc_unreachable (); } else arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP; clone_info->args[argno].arg_type = arg_type; clone_info->args[argno].linear_step = tree_to_shwi (step); } } break; } case OMP_CLAUSE_UNIFORM: { tree decl = OMP_CLAUSE_DECL (t); int argno = tree_to_uhwi (decl); clone_info->args[argno].arg_type = SIMD_CLONE_ARG_TYPE_UNIFORM; break; } case OMP_CLAUSE_ALIGNED: { /* Ignore aligned (x) for declare simd, for the ABI we really need an alignment specified. */ if (OMP_CLAUSE_ALIGNED_ALIGNMENT (t) == NULL_TREE) break; tree decl = OMP_CLAUSE_DECL (t); int argno = tree_to_uhwi (decl); clone_info->args[argno].alignment = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t)); break; } default: break; } } out: if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (node->decl)))) { warning_at (DECL_SOURCE_LOCATION (node->decl), 0, "ignoring %<#pragma omp declare simd%> on function " "with %<_Atomic%> qualified return type"); return NULL; } for (unsigned int argno = 0; argno < clone_info->nargs; argno++) if (TYPE_ATOMIC (args[argno]) && clone_info->args[argno].arg_type != SIMD_CLONE_ARG_TYPE_UNIFORM) { warning_at (DECL_SOURCE_LOCATION (node->decl), 0, "ignoring %<#pragma omp declare simd%> on function " "with %<_Atomic%> qualified non-%<uniform%> argument"); args.release (); return NULL; } return clone_info; } /* Given a SIMD clone in NODE, calculate the characteristic data type and return the coresponding type. The characteristic data type is computed as described in the Intel Vector ABI. */ static tree simd_clone_compute_base_data_type (struct cgraph_node *node, struct cgraph_simd_clone *clone_info) { tree type = integer_type_node; tree fndecl = node->decl; /* a) For non-void function, the characteristic data type is the return type. */ if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE) type = TREE_TYPE (TREE_TYPE (fndecl)); /* b) If the function has any non-uniform, non-linear parameters, then the characteristic data type is the type of the first such parameter. */ else { auto_vec<tree> map; simd_clone_vector_of_formal_parm_types (&map, fndecl); for (unsigned int i = 0; i < clone_info->nargs; ++i) if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR) { type = map[i]; break; } } /* c) If the characteristic data type determined by a) or b) above is struct, union, or class type which is pass-by-value (except for the type that maps to the built-in complex data type), the characteristic data type is int. */ if (RECORD_OR_UNION_TYPE_P (type) && !aggregate_value_p (type, NULL) && TREE_CODE (type) != COMPLEX_TYPE) return integer_type_node; /* d) If none of the above three classes is applicable, the characteristic data type is int. */ return type; /* e) For Intel Xeon Phi native and offload compilation, if the resulting characteristic data type is 8-bit or 16-bit integer data type, the characteristic data type is int. */ /* Well, we don't handle Xeon Phi yet. */ } static tree simd_clone_mangle (struct cgraph_node *node, struct cgraph_simd_clone *clone_info) { char vecsize_mangle = clone_info->vecsize_mangle; char mask = clone_info->inbranch ? 'M' : 'N'; unsigned int simdlen = clone_info->simdlen; unsigned int n; pretty_printer pp; gcc_assert (vecsize_mangle && simdlen); pp_string (&pp, "_ZGV"); pp_character (&pp, vecsize_mangle); pp_character (&pp, mask); pp_decimal_int (&pp, simdlen); for (n = 0; n < clone_info->nargs; ++n) { struct cgraph_simd_clone_arg arg = clone_info->args[n]; switch (arg.arg_type) { case SIMD_CLONE_ARG_TYPE_UNIFORM: pp_character (&pp, 'u'); break; case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: pp_character (&pp, 'l'); goto mangle_linear; case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: pp_character (&pp, 'R'); goto mangle_linear; case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: pp_character (&pp, 'L'); goto mangle_linear; case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: pp_character (&pp, 'U'); goto mangle_linear; mangle_linear: gcc_assert (arg.linear_step != 0); if (arg.linear_step > 1) pp_unsigned_wide_integer (&pp, arg.linear_step); else if (arg.linear_step < 0) { pp_character (&pp, 'n'); pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT) arg.linear_step)); } break; case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: pp_string (&pp, "ls"); pp_unsigned_wide_integer (&pp, arg.linear_step); break; case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: pp_string (&pp, "Rs"); pp_unsigned_wide_integer (&pp, arg.linear_step); break; case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: pp_string (&pp, "Ls"); pp_unsigned_wide_integer (&pp, arg.linear_step); break; case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: pp_string (&pp, "Us"); pp_unsigned_wide_integer (&pp, arg.linear_step); break; default: pp_character (&pp, 'v'); } if (arg.alignment) { pp_character (&pp, 'a'); pp_decimal_int (&pp, arg.alignment); } } pp_underscore (&pp); const char *str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl)); if (*str == '*') ++str; pp_string (&pp, str); str = pp_formatted_text (&pp); /* If there already is a SIMD clone with the same mangled name, don't add another one. This can happen e.g. for #pragma omp declare simd #pragma omp declare simd simdlen(8) int foo (int, int); if the simdlen is assumed to be 8 for the first one, etc. */ for (struct cgraph_node *clone = node->simd_clones; clone; clone = clone->simdclone->next_clone) if (id_equal (DECL_ASSEMBLER_NAME (clone->decl), str)) return NULL_TREE; return get_identifier (str); } /* Create a simd clone of OLD_NODE and return it. */ static struct cgraph_node * simd_clone_create (struct cgraph_node *old_node) { struct cgraph_node *new_node; if (old_node->definition) { if (!old_node->has_gimple_body_p ()) return NULL; old_node->get_body (); new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL, NULL, NULL, "simdclone"); } else { tree old_decl = old_node->decl; tree new_decl = copy_node (old_node->decl); DECL_NAME (new_decl) = clone_function_name_numbered (old_decl, "simdclone"); SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl)); SET_DECL_RTL (new_decl, NULL); DECL_STATIC_CONSTRUCTOR (new_decl) = 0; DECL_STATIC_DESTRUCTOR (new_decl) = 0; new_node = old_node->create_version_clone (new_decl, vNULL, NULL); if (old_node->in_other_partition) new_node->in_other_partition = 1; } if (new_node == NULL) return new_node; set_decl_built_in_function (new_node->decl, NOT_BUILT_IN, 0); TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl); DECL_COMDAT (new_node->decl) = DECL_COMDAT (old_node->decl); DECL_WEAK (new_node->decl) = DECL_WEAK (old_node->decl); DECL_EXTERNAL (new_node->decl) = DECL_EXTERNAL (old_node->decl); DECL_VISIBILITY_SPECIFIED (new_node->decl) = DECL_VISIBILITY_SPECIFIED (old_node->decl); DECL_VISIBILITY (new_node->decl) = DECL_VISIBILITY (old_node->decl); DECL_DLLIMPORT_P (new_node->decl) = DECL_DLLIMPORT_P (old_node->decl); if (DECL_ONE_ONLY (old_node->decl)) make_decl_one_only (new_node->decl, DECL_ASSEMBLER_NAME (new_node->decl)); /* The method cgraph_version_clone_with_body () will force the new symbol local. Undo this, and inherit external visibility from the old node. */ new_node->local = old_node->local; new_node->externally_visible = old_node->externally_visible; new_node->calls_declare_variant_alt = old_node->calls_declare_variant_alt; return new_node; } /* Adjust the return type of the given function to its appropriate vector counterpart. Returns a simd array to be used throughout the function as a return value. */ static tree simd_clone_adjust_return_type (struct cgraph_node *node) { tree fndecl = node->decl; tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl)); unsigned int veclen; tree t; /* Adjust the function return type. */ if (orig_rettype == void_type_node) return NULL_TREE; t = TREE_TYPE (TREE_TYPE (fndecl)); if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t)) veclen = node->simdclone->vecsize_int; else veclen = node->simdclone->vecsize_float; veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (t)); if (veclen > node->simdclone->simdlen) veclen = node->simdclone->simdlen; if (POINTER_TYPE_P (t)) t = pointer_sized_int_node; if (veclen == node->simdclone->simdlen) t = build_vector_type (t, node->simdclone->simdlen); else { t = build_vector_type (t, veclen); t = build_array_type_nelts (t, node->simdclone->simdlen / veclen); } TREE_TYPE (TREE_TYPE (fndecl)) = t; if (!node->definition) return NULL_TREE; t = DECL_RESULT (fndecl); /* Adjust the DECL_RESULT. */ gcc_assert (TREE_TYPE (t) != void_type_node); TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl)); relayout_decl (t); tree atype = build_array_type_nelts (orig_rettype, node->simdclone->simdlen); if (veclen != node->simdclone->simdlen) return build1 (VIEW_CONVERT_EXPR, atype, t); /* Set up a SIMD array to use as the return value. */ tree retval = create_tmp_var_raw (atype, "retval"); gimple_add_tmp_var (retval); return retval; } /* Each vector argument has a corresponding array to be used locally as part of the eventual loop. Create such temporary array and return it. PREFIX is the prefix to be used for the temporary. TYPE is the inner element type. SIMDLEN is the number of elements. */ static tree create_tmp_simd_array (const char *prefix, tree type, int simdlen) { tree atype = build_array_type_nelts (type, simdlen); tree avar = create_tmp_var_raw (atype, prefix); gimple_add_tmp_var (avar); return avar; } /* Modify the function argument types to their corresponding vector counterparts if appropriate. Also, create one array for each simd argument to be used locally when using the function arguments as part of the loop. NODE is the function whose arguments are to be adjusted. If NODE does not represent function definition, returns NULL. Otherwise returns an adjustment class that will be filled describing how the argument declarations will be remapped. New arguments which are not to be remapped are marked with USER_FLAG. */ static ipa_param_body_adjustments * simd_clone_adjust_argument_types (struct cgraph_node *node) { auto_vec<tree> args; if (node->definition) push_function_arg_decls (&args, node->decl); else simd_clone_vector_of_formal_parm_types (&args, node->decl); struct cgraph_simd_clone *sc = node->simdclone; vec<ipa_adjusted_param, va_gc> *new_params = NULL; vec_safe_reserve (new_params, sc->nargs); unsigned i, j, veclen; for (i = 0; i < sc->nargs; ++i) { ipa_adjusted_param adj; memset (&adj, 0, sizeof (adj)); tree parm = args[i]; tree parm_type = node->definition ? TREE_TYPE (parm) : parm; adj.base_index = i; adj.prev_clone_index = i; sc->args[i].orig_arg = node->definition ? parm : NULL_TREE; sc->args[i].orig_type = parm_type; switch (sc->args[i].arg_type) { default: /* No adjustment necessary for scalar arguments. */ adj.op = IPA_PARAM_OP_COPY; break; case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: if (node->definition) sc->args[i].simd_array = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)), TREE_TYPE (parm_type), sc->simdlen); adj.op = IPA_PARAM_OP_COPY; break; case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: case SIMD_CLONE_ARG_TYPE_VECTOR: if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type)) veclen = sc->vecsize_int; else veclen = sc->vecsize_float; veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (parm_type)); if (veclen > sc->simdlen) veclen = sc->simdlen; adj.op = IPA_PARAM_OP_NEW; adj.param_prefix_index = IPA_PARAM_PREFIX_SIMD; if (POINTER_TYPE_P (parm_type)) adj.type = build_vector_type (pointer_sized_int_node, veclen); else adj.type = build_vector_type (parm_type, veclen); sc->args[i].vector_type = adj.type; for (j = veclen; j < sc->simdlen; j += veclen) { vec_safe_push (new_params, adj); if (j == veclen) { memset (&adj, 0, sizeof (adj)); adj.op = IPA_PARAM_OP_NEW; adj.user_flag = 1; adj.param_prefix_index = IPA_PARAM_PREFIX_SIMD; adj.base_index = i; adj.prev_clone_index = i; adj.type = sc->args[i].vector_type; } } if (node->definition) sc->args[i].simd_array = create_tmp_simd_array (DECL_NAME (parm) ? IDENTIFIER_POINTER (DECL_NAME (parm)) : NULL, parm_type, sc->simdlen); } vec_safe_push (new_params, adj); } if (sc->inbranch) { tree base_type = simd_clone_compute_base_data_type (sc->origin, sc); ipa_adjusted_param adj; memset (&adj, 0, sizeof (adj)); adj.op = IPA_PARAM_OP_NEW; adj.user_flag = 1; adj.param_prefix_index = IPA_PARAM_PREFIX_MASK; adj.base_index = i; adj.prev_clone_index = i; if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type)) veclen = sc->vecsize_int; else veclen = sc->vecsize_float; veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (base_type)); if (veclen > sc->simdlen) veclen = sc->simdlen; if (sc->mask_mode != VOIDmode) adj.type = lang_hooks.types.type_for_mode (sc->mask_mode, 1); else if (POINTER_TYPE_P (base_type)) adj.type = build_vector_type (pointer_sized_int_node, veclen); else adj.type = build_vector_type (base_type, veclen); vec_safe_push (new_params, adj); for (j = veclen; j < sc->simdlen; j += veclen) vec_safe_push (new_params, adj); /* We have previously allocated one extra entry for the mask. Use it and fill it. */ sc->nargs++; if (sc->mask_mode != VOIDmode) base_type = boolean_type_node; if (node->definition) { sc->args[i].orig_arg = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type); if (sc->mask_mode == VOIDmode) sc->args[i].simd_array = create_tmp_simd_array ("mask", base_type, sc->simdlen); else if (veclen < sc->simdlen) sc->args[i].simd_array = create_tmp_simd_array ("mask", adj.type, sc->simdlen / veclen); else sc->args[i].simd_array = NULL_TREE; } sc->args[i].orig_type = base_type; sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK; } if (node->definition) { ipa_param_body_adjustments *adjustments = new ipa_param_body_adjustments (new_params, node->decl); adjustments->modify_formal_parameters (); return adjustments; } else { tree new_arg_types = NULL_TREE, new_reversed; bool last_parm_void = false; if (args.length () > 0 && args.last () == void_type_node) last_parm_void = true; gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl))); j = vec_safe_length (new_params); for (i = 0; i < j; i++) { struct ipa_adjusted_param *adj = &(*new_params)[i]; tree ptype; if (adj->op == IPA_PARAM_OP_COPY) ptype = args[adj->base_index]; else ptype = adj->type; new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types); } new_reversed = nreverse (new_arg_types); if (last_parm_void) { if (new_reversed) TREE_CHAIN (new_arg_types) = void_list_node; else new_reversed = void_list_node; } TYPE_ARG_TYPES (TREE_TYPE (node->decl)) = new_reversed; return NULL; } } /* Initialize and copy the function arguments in NODE to their corresponding local simd arrays. Returns a fresh gimple_seq with the instruction sequence generated. */ static gimple_seq simd_clone_init_simd_arrays (struct cgraph_node *node, ipa_param_body_adjustments *adjustments) { gimple_seq seq = NULL; unsigned i = 0, j = 0, k; for (tree arg = DECL_ARGUMENTS (node->decl); arg; arg = DECL_CHAIN (arg), i++, j++) { if ((*adjustments->m_adj_params)[j].op == IPA_PARAM_OP_COPY || POINTER_TYPE_P (TREE_TYPE (arg))) continue; node->simdclone->args[i].vector_arg = arg; tree array = node->simdclone->args[i].simd_array; if (node->simdclone->mask_mode != VOIDmode && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK) { if (array == NULL_TREE) continue; unsigned int l = tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (array)))); for (k = 0; k <= l; k++) { if (k) { arg = DECL_CHAIN (arg); j++; } tree t = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (array)), array, size_int (k), NULL, NULL); t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg); gimplify_and_add (t, &seq); } continue; } if (simd_clone_subparts (TREE_TYPE (arg)) == node->simdclone->simdlen) { tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array))); tree ptr = build_fold_addr_expr (array); tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr, build_int_cst (ptype, 0)); t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg); gimplify_and_add (t, &seq); } else { unsigned int simdlen = simd_clone_subparts (TREE_TYPE (arg)); tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array))); for (k = 0; k < node->simdclone->simdlen; k += simdlen) { tree ptr = build_fold_addr_expr (array); int elemsize; if (k) { arg = DECL_CHAIN (arg); j++; } tree elemtype = TREE_TYPE (TREE_TYPE (arg)); elemsize = GET_MODE_SIZE (SCALAR_TYPE_MODE (elemtype)); tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr, build_int_cst (ptype, k * elemsize)); t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg); gimplify_and_add (t, &seq); } } } return seq; } /* Callback info for ipa_simd_modify_stmt_ops below. */ struct modify_stmt_info { ipa_param_body_adjustments *adjustments; gimple *stmt; gimple *after_stmt; /* True if the parent statement was modified by ipa_simd_modify_stmt_ops. */ bool modified; }; /* Callback for walk_gimple_op. Adjust operands from a given statement as specified in the adjustments vector in the callback data. */ static tree ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = (struct walk_stmt_info *) data; struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info; tree *orig_tp = tp; if (TREE_CODE (*tp) == ADDR_EXPR) tp = &TREE_OPERAND (*tp, 0); if (TREE_CODE (*tp) == BIT_FIELD_REF || TREE_CODE (*tp) == IMAGPART_EXPR || TREE_CODE (*tp) == REALPART_EXPR) tp = &TREE_OPERAND (*tp, 0); tree repl = NULL_TREE; ipa_param_body_replacement *pbr = NULL; if (TREE_CODE (*tp) == PARM_DECL) { pbr = info->adjustments->get_expr_replacement (*tp, true); if (pbr) repl = pbr->repl; } else if (TYPE_P (*tp)) *walk_subtrees = 0; if (repl) repl = unshare_expr (repl); else { if (tp != orig_tp) { *walk_subtrees = 0; bool modified = info->modified; info->modified = false; walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset); if (!info->modified) { info->modified = modified; return NULL_TREE; } info->modified = modified; repl = *tp; } else return NULL_TREE; } if (tp != orig_tp) { if (gimple_code (info->stmt) == GIMPLE_PHI && pbr && TREE_CODE (*orig_tp) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (*orig_tp, 0)) == PARM_DECL && pbr->dummy) { gcc_assert (TREE_CODE (pbr->dummy) == SSA_NAME); *orig_tp = pbr->dummy; info->modified = true; return NULL_TREE; } repl = build_fold_addr_expr (repl); gimple *stmt; if (is_gimple_debug (info->stmt)) { tree vexpr = make_node (DEBUG_EXPR_DECL); stmt = gimple_build_debug_source_bind (vexpr, repl, NULL); DECL_ARTIFICIAL (vexpr) = 1; TREE_TYPE (vexpr) = TREE_TYPE (repl); SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (repl))); repl = vexpr; } else { stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl); repl = gimple_assign_lhs (stmt); } gimple_stmt_iterator gsi; if (gimple_code (info->stmt) == GIMPLE_PHI) { if (info->after_stmt) gsi = gsi_for_stmt (info->after_stmt); else gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))); /* Cache SSA_NAME for next time. */ if (pbr && TREE_CODE (*orig_tp) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (*orig_tp, 0)) == PARM_DECL) { gcc_assert (!pbr->dummy); pbr->dummy = repl; } } else gsi = gsi_for_stmt (info->stmt); if (info->after_stmt) gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); else gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); if (gimple_code (info->stmt) == GIMPLE_PHI) info->after_stmt = stmt; *orig_tp = repl; } else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl))) { tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl); *tp = vce; } else *tp = repl; info->modified = true; return NULL_TREE; } /* Traverse the function body and perform all modifications as described in ADJUSTMENTS. At function return, ADJUSTMENTS will be modified such that the replacement/reduction value will now be an offset into the corresponding simd_array. This function will replace all function argument uses with their corresponding simd array elements, and ajust the return values accordingly. */ static void ipa_simd_modify_function_body (struct cgraph_node *node, ipa_param_body_adjustments *adjustments, tree retval_array, tree iter) { basic_block bb; unsigned int i, j; /* Register replacements for every function argument use to an offset into the corresponding simd_array. */ for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j) { if (!node->simdclone->args[i].vector_arg || (*adjustments->m_adj_params)[j].user_flag) continue; tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg); tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg); tree r = build4 (ARRAY_REF, basetype, node->simdclone->args[i].simd_array, iter, NULL_TREE, NULL_TREE); adjustments->register_replacement (&(*adjustments->m_adj_params)[j], r); if (simd_clone_subparts (vectype) < node->simdclone->simdlen) j += node->simdclone->simdlen / simd_clone_subparts (vectype) - 1; } tree name; FOR_EACH_SSA_NAME (i, name, cfun) { tree base_var; if (SSA_NAME_VAR (name) && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL && (base_var = adjustments->get_replacement_ssa_base (SSA_NAME_VAR (name)))) { if (SSA_NAME_IS_DEFAULT_DEF (name)) { tree old_decl = SSA_NAME_VAR (name); bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); gimple_stmt_iterator gsi = gsi_after_labels (bb); tree repl = adjustments->lookup_replacement (old_decl, 0); gcc_checking_assert (repl); repl = unshare_expr (repl); set_ssa_default_def (cfun, old_decl, NULL_TREE); SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var); SSA_NAME_IS_DEFAULT_DEF (name) = 0; gimple *stmt = gimple_build_assign (name, repl); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); } else SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var); } } struct modify_stmt_info info; info.adjustments = adjustments; FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl)) { gimple_stmt_iterator gsi; for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gphi *phi = as_a <gphi *> (gsi_stmt (gsi)); int i, n = gimple_phi_num_args (phi); info.stmt = phi; info.after_stmt = NULL; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); info.modified = false; wi.info = &info; for (i = 0; i < n; ++i) { int walk_subtrees = 1; tree arg = gimple_phi_arg_def (phi, i); tree op = arg; ipa_simd_modify_stmt_ops (&op, &walk_subtrees, &wi); if (op != arg) { SET_PHI_ARG_DEF (phi, i, op); gcc_assert (TREE_CODE (op) == SSA_NAME); if (gimple_phi_arg_edge (phi, i)->flags & EDGE_ABNORMAL) SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op) = 1; } } } gsi = gsi_start_bb (bb); while (!gsi_end_p (gsi)) { gimple *stmt = gsi_stmt (gsi); info.stmt = stmt; info.after_stmt = NULL; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); info.modified = false; wi.info = &info; walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi); if (greturn *return_stmt = dyn_cast <greturn *> (stmt)) { tree retval = gimple_return_retval (return_stmt); edge e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun)); e->flags |= EDGE_FALLTHRU; if (!retval) { gsi_remove (&gsi, true); continue; } /* Replace `return foo' with `retval_array[iter] = foo'. */ tree ref = build4 (ARRAY_REF, TREE_TYPE (retval), retval_array, iter, NULL, NULL); stmt = gimple_build_assign (ref, retval); gsi_replace (&gsi, stmt, true); info.modified = true; } if (info.modified) { update_stmt (stmt); /* If the above changed the var of a debug bind into something different, remove the debug stmt. We could also for all the replaced parameters add VAR_DECLs for debug info purposes, add debug stmts for those to be the simd array accesses and replace debug stmt var operand with that var. Debugging of vectorized loops doesn't work too well, so don't bother for now. */ if ((gimple_debug_bind_p (stmt) && !DECL_P (gimple_debug_bind_get_var (stmt))) || (gimple_debug_source_bind_p (stmt) && !DECL_P (gimple_debug_source_bind_get_var (stmt)))) { gsi_remove (&gsi, true); continue; } if (maybe_clean_eh_stmt (stmt)) gimple_purge_dead_eh_edges (gimple_bb (stmt)); } gsi_next (&gsi); } } } /* Helper function of simd_clone_adjust, return linear step addend of Ith argument. */ static tree simd_clone_linear_addend (struct cgraph_node *node, unsigned int i, tree addtype, basic_block entry_bb) { tree ptype = NULL_TREE; switch (node->simdclone->args[i].arg_type) { case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP: return build_int_cst (addtype, node->simdclone->args[i].linear_step); case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP: ptype = TREE_TYPE (node->simdclone->args[i].orig_arg); break; case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP: case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP: ptype = TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg)); break; default: gcc_unreachable (); } unsigned int idx = node->simdclone->args[i].linear_step; tree arg = node->simdclone->args[idx].orig_arg; gcc_assert (is_gimple_reg_type (TREE_TYPE (arg))); gimple_stmt_iterator gsi = gsi_after_labels (entry_bb); gimple *g; tree ret; if (is_gimple_reg (arg)) ret = get_or_create_ssa_default_def (cfun, arg); else { g = gimple_build_assign (make_ssa_name (TREE_TYPE (arg)), arg); gsi_insert_before (&gsi, g, GSI_SAME_STMT); ret = gimple_assign_lhs (g); } if (TREE_CODE (TREE_TYPE (arg)) == REFERENCE_TYPE) { g = gimple_build_assign (make_ssa_name (TREE_TYPE (TREE_TYPE (arg))), build_simple_mem_ref (ret)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); ret = gimple_assign_lhs (g); } if (!useless_type_conversion_p (addtype, TREE_TYPE (ret))) { g = gimple_build_assign (make_ssa_name (addtype), NOP_EXPR, ret); gsi_insert_before (&gsi, g, GSI_SAME_STMT); ret = gimple_assign_lhs (g); } if (POINTER_TYPE_P (ptype)) { tree size = TYPE_SIZE_UNIT (TREE_TYPE (ptype)); if (size && TREE_CODE (size) == INTEGER_CST) { g = gimple_build_assign (make_ssa_name (addtype), MULT_EXPR, ret, fold_convert (addtype, size)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); ret = gimple_assign_lhs (g); } } return ret; } /* Adjust the argument types in NODE to their appropriate vector counterparts. */ static void simd_clone_adjust (struct cgraph_node *node) { push_cfun (DECL_STRUCT_FUNCTION (node->decl)); TREE_TYPE (node->decl) = build_distinct_type_copy (TREE_TYPE (node->decl)); targetm.simd_clone.adjust (node); tree retval = simd_clone_adjust_return_type (node); ipa_param_body_adjustments *adjustments = simd_clone_adjust_argument_types (node); gcc_assert (adjustments); push_gimplify_context (); gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments); /* Adjust all uses of vector arguments accordingly. Adjust all return values accordingly. */ tree iter = create_tmp_var (unsigned_type_node, "iter"); tree iter1 = make_ssa_name (iter); tree iter2 = NULL_TREE; ipa_simd_modify_function_body (node, adjustments, retval, iter1); delete adjustments; /* Initialize the iteration variable. */ basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); basic_block body_bb = split_block_after_labels (entry_bb)->dest; gimple_stmt_iterator gsi = gsi_after_labels (entry_bb); /* Insert the SIMD array and iv initialization at function entry. */ gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT); pop_gimplify_context (NULL); gimple *g; basic_block incr_bb = NULL; class loop *loop = NULL; /* Create a new BB right before the original exit BB, to hold the iteration increment and the condition/branch. */ if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)) { basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src; incr_bb = create_empty_bb (orig_exit); incr_bb->count = profile_count::zero (); add_bb_to_loop (incr_bb, body_bb->loop_father); while (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)) { edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0); redirect_edge_succ (e, incr_bb); incr_bb->count += e->count (); } } else if (node->simdclone->inbranch) { incr_bb = create_empty_bb (entry_bb); incr_bb->count = profile_count::zero (); add_bb_to_loop (incr_bb, body_bb->loop_father); } if (incr_bb) { make_single_succ_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0); gsi = gsi_last_bb (incr_bb); iter2 = make_ssa_name (iter); g = gimple_build_assign (iter2, PLUS_EXPR, iter1, build_int_cst (unsigned_type_node, 1)); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); /* Mostly annotate the loop for the vectorizer (the rest is done below). */ loop = alloc_loop (); cfun->has_force_vectorize_loops = true; loop->safelen = node->simdclone->simdlen; loop->force_vectorize = true; loop->header = body_bb; } /* Branch around the body if the mask applies. */ if (node->simdclone->inbranch) { gsi = gsi_last_bb (loop->header); tree mask_array = node->simdclone->args[node->simdclone->nargs - 1].simd_array; tree mask; if (node->simdclone->mask_mode != VOIDmode) { tree shift_cnt; if (mask_array == NULL_TREE) { tree arg = node->simdclone->args[node->simdclone->nargs - 1].vector_arg; mask = get_or_create_ssa_default_def (cfun, arg); shift_cnt = iter1; } else { tree maskt = TREE_TYPE (mask_array); int c = tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (maskt))); c = node->simdclone->simdlen / (c + 1); int s = exact_log2 (c); gcc_assert (s > 0); c--; tree idx = make_ssa_name (TREE_TYPE (iter1)); g = gimple_build_assign (idx, RSHIFT_EXPR, iter1, build_int_cst (NULL_TREE, s)); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array))); tree aref = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (mask_array)), mask_array, idx, NULL, NULL); g = gimple_build_assign (mask, aref); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); shift_cnt = make_ssa_name (TREE_TYPE (iter1)); g = gimple_build_assign (shift_cnt, BIT_AND_EXPR, iter1, build_int_cst (TREE_TYPE (iter1), c)); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); } g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)), RSHIFT_EXPR, mask, shift_cnt); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); mask = gimple_assign_lhs (g); g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)), BIT_AND_EXPR, mask, build_int_cst (TREE_TYPE (mask), 1)); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); mask = gimple_assign_lhs (g); } else { mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array))); tree aref = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (mask_array)), mask_array, iter1, NULL, NULL); g = gimple_build_assign (mask, aref); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); int bitsize = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (aref))); if (!INTEGRAL_TYPE_P (TREE_TYPE (aref))) { aref = build1 (VIEW_CONVERT_EXPR, build_nonstandard_integer_type (bitsize, 0), mask); mask = make_ssa_name (TREE_TYPE (aref)); g = gimple_build_assign (mask, aref); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); } } g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)), NULL, NULL); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); edge e = make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE); e->probability = profile_probability::unlikely ().guessed (); incr_bb->count += e->count (); edge fallthru = FALLTHRU_EDGE (loop->header); fallthru->flags = EDGE_FALSE_VALUE; fallthru->probability = profile_probability::likely ().guessed (); } basic_block latch_bb = NULL; basic_block new_exit_bb = NULL; /* Generate the condition. */ if (incr_bb) { gsi = gsi_last_bb (incr_bb); g = gimple_build_cond (LT_EXPR, iter2, build_int_cst (unsigned_type_node, node->simdclone->simdlen), NULL, NULL); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); edge e = split_block (incr_bb, gsi_stmt (gsi)); latch_bb = e->dest; new_exit_bb = split_block_after_labels (latch_bb)->dest; loop->latch = latch_bb; redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb); edge new_e = make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE); /* FIXME: Do we need to distribute probabilities for the conditional? */ new_e->probability = profile_probability::guessed_never (); /* The successor of incr_bb is already pointing to latch_bb; just change the flags. make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */ FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE; } gphi *phi = create_phi_node (iter1, body_bb); edge preheader_edge = find_edge (entry_bb, body_bb); edge latch_edge = NULL; add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge, UNKNOWN_LOCATION); if (incr_bb) { latch_edge = single_succ_edge (latch_bb); add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION); /* Generate the new return. */ gsi = gsi_last_bb (new_exit_bb); if (retval && TREE_CODE (retval) == VIEW_CONVERT_EXPR && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL) retval = TREE_OPERAND (retval, 0); else if (retval) { retval = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (TREE_TYPE (node->decl)), retval); retval = force_gimple_operand_gsi (&gsi, retval, true, NULL, false, GSI_CONTINUE_LINKING); } g = gimple_build_return (retval); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); } /* Handle aligned clauses by replacing default defs of the aligned uniform args with __builtin_assume_aligned (arg_N(D), alignment) lhs. Handle linear by adding PHIs. */ for (unsigned i = 0; i < node->simdclone->nargs; i++) if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM && (TREE_ADDRESSABLE (node->simdclone->args[i].orig_arg) || !is_gimple_reg_type (TREE_TYPE (node->simdclone->args[i].orig_arg)))) { tree orig_arg = node->simdclone->args[i].orig_arg; if (is_gimple_reg_type (TREE_TYPE (orig_arg))) iter1 = make_ssa_name (TREE_TYPE (orig_arg)); else { iter1 = create_tmp_var_raw (TREE_TYPE (orig_arg)); gimple_add_tmp_var (iter1); } gsi = gsi_after_labels (entry_bb); g = gimple_build_assign (iter1, orig_arg); gsi_insert_before (&gsi, g, GSI_NEW_STMT); gsi = gsi_after_labels (body_bb); g = gimple_build_assign (orig_arg, iter1); gsi_insert_before (&gsi, g, GSI_NEW_STMT); } else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM && DECL_BY_REFERENCE (node->simdclone->args[i].orig_arg) && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg)) == REFERENCE_TYPE && TREE_ADDRESSABLE (TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg)))) { tree orig_arg = node->simdclone->args[i].orig_arg; tree def = ssa_default_def (cfun, orig_arg); if (def && !has_zero_uses (def)) { iter1 = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (orig_arg))); gimple_add_tmp_var (iter1); gsi = gsi_after_labels (entry_bb); g = gimple_build_assign (iter1, build_simple_mem_ref (def)); gsi_insert_before (&gsi, g, GSI_NEW_STMT); gsi = gsi_after_labels (body_bb); g = gimple_build_assign (build_simple_mem_ref (def), iter1); gsi_insert_before (&gsi, g, GSI_NEW_STMT); } } else if (node->simdclone->args[i].alignment && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM && (node->simdclone->args[i].alignment & (node->simdclone->args[i].alignment - 1)) == 0 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg)) == POINTER_TYPE) { unsigned int alignment = node->simdclone->args[i].alignment; tree orig_arg = node->simdclone->args[i].orig_arg; tree def = ssa_default_def (cfun, orig_arg); if (def && !has_zero_uses (def)) { tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED); gimple_seq seq = NULL; bool need_cvt = false; gcall *call = gimple_build_call (fn, 2, def, size_int (alignment)); g = call; if (!useless_type_conversion_p (TREE_TYPE (orig_arg), ptr_type_node)) need_cvt = true; tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg); gimple_call_set_lhs (g, t); gimple_seq_add_stmt_without_update (&seq, g); if (need_cvt) { t = make_ssa_name (orig_arg); g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g)); gimple_seq_add_stmt_without_update (&seq, g); } gsi_insert_seq_on_edge_immediate (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq); entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)); node->create_edge (cgraph_node::get_create (fn), call, entry_bb->count); imm_use_iterator iter; use_operand_p use_p; gimple *use_stmt; tree repl = gimple_get_lhs (g); FOR_EACH_IMM_USE_STMT (use_stmt, iter, def) if (is_gimple_debug (use_stmt) || use_stmt == call) continue; else FOR_EACH_IMM_USE_ON_STMT (use_p, iter) SET_USE (use_p, repl); } } else if ((node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP) || (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP) || (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP) || (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP)) { tree orig_arg = node->simdclone->args[i].orig_arg; gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg)) || POINTER_TYPE_P (TREE_TYPE (orig_arg))); tree def = NULL_TREE; if (TREE_ADDRESSABLE (orig_arg)) { def = make_ssa_name (TREE_TYPE (orig_arg)); iter1 = make_ssa_name (TREE_TYPE (orig_arg)); if (incr_bb) iter2 = make_ssa_name (TREE_TYPE (orig_arg)); gsi = gsi_after_labels (entry_bb); g = gimple_build_assign (def, orig_arg); gsi_insert_before (&gsi, g, GSI_NEW_STMT); } else { def = ssa_default_def (cfun, orig_arg); if (!def || has_zero_uses (def)) def = NULL_TREE; else { iter1 = make_ssa_name (orig_arg); if (incr_bb) iter2 = make_ssa_name (orig_arg); } } if (def) { phi = create_phi_node (iter1, body_bb); add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION); if (incr_bb) { add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION); enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg)) ? PLUS_EXPR : POINTER_PLUS_EXPR; tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg)) ? TREE_TYPE (orig_arg) : sizetype; tree addcst = simd_clone_linear_addend (node, i, addtype, entry_bb); gsi = gsi_last_bb (incr_bb); g = gimple_build_assign (iter2, code, iter1, addcst); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } imm_use_iterator iter; use_operand_p use_p; gimple *use_stmt; if (TREE_ADDRESSABLE (orig_arg)) { gsi = gsi_after_labels (body_bb); g = gimple_build_assign (orig_arg, iter1); gsi_insert_before (&gsi, g, GSI_NEW_STMT); } else FOR_EACH_IMM_USE_STMT (use_stmt, iter, def) if (use_stmt == phi) continue; else FOR_EACH_IMM_USE_ON_STMT (use_p, iter) SET_USE (use_p, iter1); } } else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP || (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP)) { tree orig_arg = node->simdclone->args[i].orig_arg; tree def = ssa_default_def (cfun, orig_arg); gcc_assert (!TREE_ADDRESSABLE (orig_arg) && TREE_CODE (TREE_TYPE (orig_arg)) == REFERENCE_TYPE); if (def && !has_zero_uses (def)) { tree rtype = TREE_TYPE (TREE_TYPE (orig_arg)); iter1 = make_ssa_name (orig_arg); if (incr_bb) iter2 = make_ssa_name (orig_arg); tree iter3 = make_ssa_name (rtype); tree iter4 = make_ssa_name (rtype); tree iter5 = incr_bb ? make_ssa_name (rtype) : NULL_TREE; gsi = gsi_after_labels (entry_bb); gimple *load = gimple_build_assign (iter3, build_simple_mem_ref (def)); gsi_insert_before (&gsi, load, GSI_NEW_STMT); tree array = node->simdclone->args[i].simd_array; TREE_ADDRESSABLE (array) = 1; tree ptr = build_fold_addr_expr (array); phi = create_phi_node (iter1, body_bb); add_phi_arg (phi, ptr, preheader_edge, UNKNOWN_LOCATION); if (incr_bb) { add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION); g = gimple_build_assign (iter2, POINTER_PLUS_EXPR, iter1, TYPE_SIZE_UNIT (TREE_TYPE (iter3))); gsi = gsi_last_bb (incr_bb); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } phi = create_phi_node (iter4, body_bb); add_phi_arg (phi, iter3, preheader_edge, UNKNOWN_LOCATION); if (incr_bb) { add_phi_arg (phi, iter5, latch_edge, UNKNOWN_LOCATION); enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (iter3)) ? PLUS_EXPR : POINTER_PLUS_EXPR; tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (iter3)) ? TREE_TYPE (iter3) : sizetype; tree addcst = simd_clone_linear_addend (node, i, addtype, entry_bb); g = gimple_build_assign (iter5, code, iter4, addcst); gsi = gsi_last_bb (incr_bb); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } g = gimple_build_assign (build_simple_mem_ref (iter1), iter4); gsi = gsi_after_labels (body_bb); gsi_insert_before (&gsi, g, GSI_SAME_STMT); imm_use_iterator iter; use_operand_p use_p; gimple *use_stmt; FOR_EACH_IMM_USE_STMT (use_stmt, iter, def) if (use_stmt == load) continue; else FOR_EACH_IMM_USE_ON_STMT (use_p, iter) SET_USE (use_p, iter1); if (!TYPE_READONLY (rtype) && incr_bb) { tree v = make_ssa_name (rtype); tree aref = build4 (ARRAY_REF, rtype, array, size_zero_node, NULL_TREE, NULL_TREE); gsi = gsi_after_labels (new_exit_bb); g = gimple_build_assign (v, aref); gsi_insert_before (&gsi, g, GSI_SAME_STMT); g = gimple_build_assign (build_simple_mem_ref (def), v); gsi_insert_before (&gsi, g, GSI_SAME_STMT); } } } calculate_dominance_info (CDI_DOMINATORS); if (loop) add_loop (loop, loop->header->loop_father); update_ssa (TODO_update_ssa); pop_cfun (); } /* If the function in NODE is tagged as an elemental SIMD function, create the appropriate SIMD clones. */ void expand_simd_clones (struct cgraph_node *node) { tree attr = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (node->decl)); if (attr == NULL_TREE || node->inlined_to || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl))) return; /* Ignore #pragma omp declare simd extern int foo (); in C, there we don't know the argument types at all. */ if (!node->definition && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE) return; /* Call this before creating clone_info, as it might ggc_collect. */ if (node->definition && node->has_gimple_body_p ()) node->get_body (); do { /* Start with parsing the "omp declare simd" attribute(s). */ bool inbranch_clause_specified; struct cgraph_simd_clone *clone_info = simd_clone_clauses_extract (node, TREE_VALUE (attr), &inbranch_clause_specified); if (clone_info == NULL) continue; int orig_simdlen = clone_info->simdlen; tree base_type = simd_clone_compute_base_data_type (node, clone_info); /* The target can return 0 (no simd clones should be created), 1 (just one ISA of simd clones should be created) or higher count of ISA variants. In that case, clone_info is initialized for the first ISA variant. */ int count = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info, base_type, 0); if (count == 0) continue; /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED, also create one inbranch and one !inbranch clone of it. */ for (int i = 0; i < count * 2; i++) { struct cgraph_simd_clone *clone = clone_info; if (inbranch_clause_specified && (i & 1) != 0) continue; if (i != 0) { clone = simd_clone_struct_alloc (clone_info->nargs + ((i & 1) != 0)); simd_clone_struct_copy (clone, clone_info); /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen and simd_clone_adjust_argument_types did to the first clone's info. */ clone->nargs -= clone_info->inbranch; clone->simdlen = orig_simdlen; /* And call the target hook again to get the right ISA. */ targetm.simd_clone.compute_vecsize_and_simdlen (node, clone, base_type, i / 2); if ((i & 1) != 0) clone->inbranch = 1; } /* simd_clone_mangle might fail if such a clone has been created already. */ tree id = simd_clone_mangle (node, clone); if (id == NULL_TREE) { if (i == 0) clone->nargs += clone->inbranch; continue; } /* Only when we are sure we want to create the clone actually clone the function (or definitions) or create another extern FUNCTION_DECL (for prototypes without definitions). */ struct cgraph_node *n = simd_clone_create (node); if (n == NULL) { if (i == 0) clone->nargs += clone->inbranch; continue; } n->simdclone = clone; clone->origin = node; clone->next_clone = NULL; if (node->simd_clones == NULL) { clone->prev_clone = n; node->simd_clones = n; } else { clone->prev_clone = node->simd_clones->simdclone->prev_clone; clone->prev_clone->simdclone->next_clone = n; node->simd_clones->simdclone->prev_clone = n; } symtab->change_decl_assembler_name (n->decl, id); /* And finally adjust the return type, parameters and for definitions also function body. */ if (node->definition) simd_clone_adjust (n); else { TREE_TYPE (n->decl) = build_distinct_type_copy (TREE_TYPE (n->decl)); targetm.simd_clone.adjust (n); simd_clone_adjust_return_type (n); simd_clone_adjust_argument_types (n); } } } while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr)))); } /* Entry point for IPA simd clone creation pass. */ static unsigned int ipa_omp_simd_clone (void) { struct cgraph_node *node; FOR_EACH_FUNCTION (node) expand_simd_clones (node); return 0; } namespace { const pass_data pass_data_omp_simd_clone = { SIMPLE_IPA_PASS, /* type */ "simdclone", /* name */ OPTGROUP_OMP, /* optinfo_flags */ TV_NONE, /* tv_id */ ( PROP_ssa | PROP_cfg ), /* properties_required */ 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_omp_simd_clone : public simple_ipa_opt_pass { public: pass_omp_simd_clone(gcc::context *ctxt) : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *); virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); } }; bool pass_omp_simd_clone::gate (function *) { return targetm.simd_clone.compute_vecsize_and_simdlen != NULL; } } // anon namespace simple_ipa_opt_pass * make_pass_omp_simd_clone (gcc::context *ctxt) { return new pass_omp_simd_clone (ctxt); }
atomic-1.c
/* { dg-do compile } */ /* { dg-additional-options "-Wno-volatile" { target c++ } } */ int x; volatile int y; volatile unsigned char z; void f1(void) { #pragma omp atomic x++; #pragma omp atomic x--; #pragma omp atomic ++x; #pragma omp atomic --x; #pragma omp atomic x += 1; #pragma omp atomic x -= y; #pragma omp atomic x |= 1; #pragma omp atomic x &= 1; #pragma omp atomic x ^= 1; #pragma omp atomic x *= 3; #pragma omp atomic x /= 3; #pragma omp atomic x /= 3; #pragma omp atomic x <<= 3; #pragma omp atomic x >>= 3; } void f2(void) { #pragma omp atomic y++; #pragma omp atomic y--; #pragma omp atomic ++y; #pragma omp atomic --y; #pragma omp atomic y += 1; #pragma omp atomic y -= x; #pragma omp atomic y |= 1; #pragma omp atomic y &= 1; #pragma omp atomic y ^= 1; #pragma omp atomic y *= 3; #pragma omp atomic y /= 3; #pragma omp atomic y /= 3; #pragma omp atomic y <<= 3; #pragma omp atomic y >>= 3; } void f3(void) { #pragma omp atomic z++; #pragma omp atomic z--; #pragma omp atomic ++z; #pragma omp atomic --z; #pragma omp atomic z += 1; #pragma omp atomic z |= 1; #pragma omp atomic z &= 1; #pragma omp atomic z ^= 1; #pragma omp atomic z *= 3; #pragma omp atomic z /= 3; #pragma omp atomic z /= 3; #pragma omp atomic z <<= 3; #pragma omp atomic z >>= 3; }
deconvolution_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { v4f32 _sum = (v4f32)__msa_fill_w(0); if (bias_data_ptr) { _sum = (v4f32)__msa_ld_w((const float*)bias_data_ptr + p * 4, 0); } const float* kptr = (const float*)weight_data_pack4.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * 4; int k = (y * kernel_w + x) * 16; v4f32 _val0 = (v4f32)__msa_fill_w_f32(*sptr++); v4f32 _val1 = (v4f32)__msa_fill_w_f32(*sptr++); v4f32 _val2 = (v4f32)__msa_fill_w_f32(*sptr++); v4f32 _val3 = (v4f32)__msa_fill_w_f32(*sptr++); v4f32 _w0 = (v4f32)__msa_ld_w(kptr + k, 0); v4f32 _w1 = (v4f32)__msa_ld_w(kptr + k + 4, 0); v4f32 _w2 = (v4f32)__msa_ld_w(kptr + k + 8, 0); v4f32 _w3 = (v4f32)__msa_ld_w(kptr + k + 12, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); _sum = __msa_fmadd_w(_sum, _val1, _w1); _sum = __msa_fmadd_w(_sum, _val2, _w2); _sum = __msa_fmadd_w(_sum, _val3, _w3); } } kptr += maxk * 16; } _sum = activation_ps(_sum, activation_type, activation_params); __msa_st_w((v4i32)_sum, outptr + j * 4, 0); } outptr += outw * 4; } } }
DRB031-truedepfirstdimension-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* There is a loop-carried true dependence within the outer level loop. Data race pair: b[i][j]@66:7 vs. b[i-1][j-1]@66:15 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i,j; int n=1000, m=1000; double b[1000][1000]; #pragma omp target data map(from:b[0:1000][0:1000]) { #pragma omp target { #pragma omp parallel for private(j) for (i=0; i<n; i++) for (j=0; j<m; j++) b[i][j] = 0.5; } } #pragma omp target data map(tofrom:b[0:1000][0:1000]) { for (i=1;i<n;i++) #pragma omp target parallel for for (j=1;j<m;j++) b[i][j]=b[i-1][j-1]; } for (i=0;i<n;i++) for (j=0;j<m;j++) printf("b[%d][%d]=%f\n", i, j, b[i][j]); return 0; }
matvec.c
/* * OpenMP implementation of matrix-vector multiplication (not optimized). * To be used with the in-class demo in model [A2]: Task Mapping on Soft Heterogeneous Systems * * Apan Qasem <apan@txtstate.edu> * last updated: 03/09/2021 */ #include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include<omp.h> #define VAL_RANGE 1023 /* timer function */ double get_time_in_seconds() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } double dot_prod(double *x, double *y, int n) { double sum = 0.0; int i; #pragma omp parallel for reduction(+:sum) for (i = 0; i < n; i++) sum += x[i] * y[i]; return sum; } void matrix_vector_mult(double **mat, double *vec, double *result, long long rows, long long cols) { /* not parallelelized to ensure runtimes are more meaningful */ int i; for (i = 0; i < rows; i++) result[i] = dot_prod(mat[i], vec, cols); } void display_matrix(const double **matrix, long long N) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) printf("%3.4f ", matrix[i][j]); printf("\n"); } } int main(int argc, char *argv[]) { if (argc < 4) { printf("usage: \n"); printf(" ./matvec N n t\n"); printf(" N = matrix dimension\n"); printf(" n = number of reps\n"); printf(" t = number of threads\n"); exit(0); } /* matrix dimenstion, assume N x N matrix and vector of size of N*/ long long N = atoi(argv[1]); /* number of reps; control running time of program */ unsigned n = atoi(argv[2]); /* number of OpenMP threads */ unsigned threads = atoi(argv[3]); omp_set_num_threads(threads); double **matrix; double *vec; double *result; int i, j; double start_time, end_time; /* memory allocation and initilization */ #ifdef INIT_TIMER start_time = get_time_in_seconds(); #endif matrix = (double **) malloc(sizeof(double *) * N); for (i = 0; i < N; i++) matrix[i] = (double *) malloc(sizeof(double) * N); vec = (double *) malloc(sizeof(double) * N); result = (double *) malloc(sizeof(double) * N); for (i = 0; i < N; i++) for (j = 0; j < N; j++) matrix[i][j] = rand() / (double) (RAND_MAX/VAL_RANGE); for (i = 0; i < N; i++) vec[i] = rand() / (double) (RAND_MAX/VAL_RANGE); #ifdef INIT_TIMER end_time = get_time_in_seconds(); fprintf(stdout, "Initialization time = %.3f s\n", end_time - start_time); #endif /* computation */ start_time = get_time_in_seconds(); for (i = 0; i < n; i++) matrix_vector_mult(matrix, vec, result, N, N); end_time = get_time_in_seconds(); /* verification (by inspection only) */ fprintf(stdout, "Verification: "); for (unsigned i = 0; i < 1; i++) fprintf(stdout, "result[%d] = %3.2e\n", i, result[i]); fprintf(stdout, "\n\033[0;33mCompute time = %.3f s\n\033[0m", end_time - start_time); return 0; }
map.c
#include <stdlib.h> #include <string.h> #include <assert.h> #include <errno.h> #include <cinttypes> #include <algorithm> #include <tuple> #include <omp.h> #include <iostream> #include "kthread.h" #include "kvec.h" #include "kalloc.h" #include "sdust.h" #include "mmpriv.h" #include "bseq.h" #include "khash.h" struct mm_tbuf_s { void *km; int rep_len, frag_gap; }; mm_tbuf_t *mm_tbuf_init(void) { mm_tbuf_t *b; b = (mm_tbuf_t*)calloc(1, sizeof(mm_tbuf_t)); if (!(mm_dbg_flag & 1)) b->km = km_init(); return b; } void mm_tbuf_destroy(mm_tbuf_t *b) { if (b == 0) return; km_destroy(b->km); free(b); } void *mm_tbuf_get_km(mm_tbuf_t *b) { return b->km; } static int mm_dust_minier(void *km, int n, mm128_t *a, int l_seq, const char *seq, int sdust_thres) { int n_dreg, j, k, u = 0; const uint64_t *dreg; sdust_buf_t *sdb; if (sdust_thres <= 0) return n; sdb = sdust_buf_init(km); dreg = sdust_core((const uint8_t*)seq, l_seq, sdust_thres, 64, &n_dreg, sdb); for (j = k = 0; j < n; ++j) { // squeeze out minimizers that significantly overlap with LCRs int32_t qpos = (uint32_t)a[j].y>>1, span = a[j].x&0xff; int32_t s = qpos - (span - 1), e = s + span; while (u < n_dreg && (int32_t)dreg[u] <= s) ++u; if (u < n_dreg && (int32_t)(dreg[u]>>32) < e) { int v, l = 0; for (v = u; v < n_dreg && (int32_t)(dreg[v]>>32) < e; ++v) { // iterate over LCRs overlapping this minimizer int ss = s > (int32_t)(dreg[v]>>32)? s : dreg[v]>>32; int ee = e < (int32_t)dreg[v]? e : (uint32_t)dreg[v]; l += ee - ss; } if (l <= span>>1) a[k++] = a[j]; // keep the minimizer if less than half of it falls in masked region } else a[k++] = a[j]; } sdust_buf_destroy(sdb); return k; // the new size } static void collect_minimizers(void *km, const mm_mapopt_t *opt, const mm_idx_t *mi, int n_segs, const int *qlens, const char* const* seqs, mm128_v *mv) { int i, n, sum = 0; mv->n = 0; for (i = n = 0; i < n_segs; ++i) { size_t j; mm_sketch(km, seqs[i], qlens[i], mi->w, mi->k, i, mi->flag&MM_I_HPC, mv, mi,mi->ds,mi->s,mi->pos1,mi->pos2,mi->pos3,mi->pos4); for (j = n; j < mv->n; ++j) mv->a[j].y += sum << 1; if (opt->sdust_thres > 0) // mask low-complexity minimizers mv->n = n + mm_dust_minier(km, mv->n - n, mv->a + n, qlens[i], seqs[i], opt->sdust_thres); sum += qlens[i], n = mv->n; } } #include "ksort.h" #define heap_lt(a, b) ((a).x > (b).x) KSORT_INIT(heap, mm128_t, heap_lt) typedef struct { uint32_t n; uint32_t q_pos, q_span; uint32_t seg_id:31, is_tandem:1; const uint64_t *cr; } mm_match_t; static mm_match_t *collect_matches(void *km, int *_n_m, int max_occ, const mm_idx_t *mi, const mm128_v *mv, int64_t *n_a, int *rep_len, int *n_mini_pos, uint64_t **mini_pos) { int rep_st = 0, rep_en = 0, n_m; size_t i; mm_match_t *m; *n_mini_pos = 0; *mini_pos = (uint64_t*)kmalloc(km, mv->n * sizeof(uint64_t)); m = (mm_match_t*)kmalloc(km, mv->n * sizeof(mm_match_t)); for (i = 0, n_m = 0, *rep_len = 0, *n_a = 0; i < mv->n; ++i) { const uint64_t *cr; mm128_t *p = &mv->a[i]; uint32_t q_pos = (uint32_t)p->y, q_span = p->x & 0xff; int t; cr = mm_idx_get(mi, p->x>>8, &t); if (t >= max_occ) { int en = (q_pos >> 1) + 1, st = en - q_span; if (st > rep_en) { *rep_len += rep_en - rep_st; rep_st = st, rep_en = en; } else rep_en = en; } else { mm_match_t *q = &m[n_m++]; q->q_pos = q_pos, q->q_span = q_span, q->cr = cr, q->n = t, q->seg_id = p->y >> 32; q->is_tandem = 0; if (i > 0 && p->x>>8 == mv->a[i - 1].x>>8) q->is_tandem = 1; if (i < mv->n - 1 && p->x>>8 == mv->a[i + 1].x>>8) q->is_tandem = 1; *n_a += q->n; (*mini_pos)[(*n_mini_pos)++] = (uint64_t)q_span<<32 | q_pos>>1; } } *rep_len += rep_en - rep_st; *_n_m = n_m; return m; } static inline int skip_seed(int flag, uint64_t r, const mm_match_t *q, const char *qname, int qlen, const mm_idx_t *mi, int *is_self) { *is_self = 0; if (qname && (flag & (MM_F_NO_DIAG|MM_F_NO_DUAL))) { const mm_idx_seq_t *s = &mi->seq[r>>32]; int cmp; cmp = strcmp(qname, s->name); if ((flag&MM_F_NO_DIAG) && cmp == 0 && (int)s->len == qlen) { if ((uint32_t)r>>1 == (q->q_pos>>1)) return 1; // avoid the diagnonal anchors if ((r&1) == (q->q_pos&1)) *is_self = 1; // this flag is used to avoid spurious extension on self chain } if ((flag&MM_F_NO_DUAL) && cmp > 0) // all-vs-all mode: map once return 1; } if (flag & (MM_F_FOR_ONLY|MM_F_REV_ONLY)) { if ((r&1) == (q->q_pos&1)) { // forward strand if (flag & MM_F_REV_ONLY) return 1; } else { if (flag & MM_F_FOR_ONLY) return 1; } } return 0; } static mm128_t *collect_seed_hits_heap(void *km, const mm_mapopt_t *opt, int max_occ, const mm_idx_t *mi, const char *qname, const mm128_v *mv, int qlen, int64_t *n_a, int *rep_len, int *n_mini_pos, uint64_t **mini_pos) { int i, n_m, heap_size = 0; int64_t j, n_for = 0, n_rev = 0; mm_match_t *m; mm128_t *a, *heap; m = collect_matches(km, &n_m, max_occ, mi, mv, n_a, rep_len, n_mini_pos, mini_pos); heap = (mm128_t*)kmalloc(km, n_m * sizeof(mm128_t)); a = (mm128_t*)kmalloc(km, *n_a * sizeof(mm128_t)); for (i = 0, heap_size = 0; i < n_m; ++i) { if (m[i].n > 0) { heap[heap_size].x = m[i].cr[0]; heap[heap_size].y = (uint64_t)i<<32; ++heap_size; } } ks_heapmake_heap(heap_size, heap); while (heap_size > 0) { mm_match_t *q = &m[heap->y>>32]; mm128_t *p; uint64_t r = heap->x; int32_t is_self, rpos = (uint32_t)r >> 1; if (!skip_seed(opt->flag, r, q, qname, qlen, mi, &is_self)) { if ((r&1) == (q->q_pos&1)) { // forward strand p = &a[n_for++]; p->x = (r&0xffffffff00000000ULL) | rpos; p->y = (uint64_t)q->q_span << 32 | q->q_pos >> 1; } else { // reverse strand p = &a[(*n_a) - (++n_rev)]; p->x = 1ULL<<63 | (r&0xffffffff00000000ULL) | rpos; p->y = (uint64_t)q->q_span << 32 | (qlen - ((q->q_pos>>1) + 1 - q->q_span) - 1); } p->y |= (uint64_t)q->seg_id << MM_SEED_SEG_SHIFT; if (q->is_tandem) p->y |= MM_SEED_TANDEM; if (is_self) p->y |= MM_SEED_SELF; } // update the heap if ((uint32_t)heap->y < q->n - 1) { ++heap[0].y; heap[0].x = m[heap[0].y>>32].cr[(uint32_t)heap[0].y]; } else { heap[0] = heap[heap_size - 1]; --heap_size; } ks_heapdown_heap(0, heap_size, heap); } kfree(km, m); kfree(km, heap); // reverse anchors on the reverse strand, as they are in the descending order for (j = 0; j < n_rev>>1; ++j) { mm128_t t = a[(*n_a) - 1 - j]; a[(*n_a) - 1 - j] = a[(*n_a) - (n_rev - j)]; a[(*n_a) - (n_rev - j)] = t; } if (*n_a > n_for + n_rev) { memmove(a + n_for, a + (*n_a) - n_rev, n_rev * sizeof(mm128_t)); *n_a = n_for + n_rev; } return a; } static mm128_t *collect_seed_hits(void *km, const mm_mapopt_t *opt, int max_occ, const mm_idx_t *mi, const char *qname, const mm128_v *mv, int qlen, int64_t *n_a, int *rep_len, int *n_mini_pos, uint64_t **mini_pos) { int i, n_m; mm_match_t *m; mm128_t *a; m = collect_matches(km, &n_m, max_occ, mi, mv, n_a, rep_len, n_mini_pos, mini_pos); a = (mm128_t*)kmalloc(km, *n_a * sizeof(mm128_t)); for (i = 0, *n_a = 0; i < n_m; ++i) { mm_match_t *q = &m[i]; const uint64_t *r = q->cr; uint32_t k; for (k = 0; k < q->n; ++k) { int32_t is_self, rpos = (uint32_t)r[k] >> 1; mm128_t *p; if (skip_seed(opt->flag, r[k], q, qname, qlen, mi, &is_self)) continue; p = &a[(*n_a)++]; if ((r[k]&1) == (q->q_pos&1)) { // forward strand p->x = (r[k]&0xffffffff00000000ULL) | rpos; p->y = (uint64_t)q->q_span << 32 | q->q_pos >> 1; } else { // reverse strand p->x = 1ULL<<63 | (r[k]&0xffffffff00000000ULL) | rpos; p->y = (uint64_t)q->q_span << 32 | (qlen - ((q->q_pos>>1) + 1 - q->q_span) - 1); } p->y |= (uint64_t)q->seg_id << MM_SEED_SEG_SHIFT; if (q->is_tandem) p->y |= MM_SEED_TANDEM; if (is_self) p->y |= MM_SEED_SELF; } } kfree(km, m); radix_sort_128x(a, a + (*n_a)); return a; } static void chain_post(const mm_mapopt_t *opt, int max_chain_gap_ref, const mm_idx_t *mi, void *km, int qlen, int n_segs, const int *qlens, int *n_regs, mm_reg1_t *regs, mm128_t *a) { if (!(opt->flag & MM_F_ALL_CHAINS)) { // don't choose primary mapping(s) mm_set_parent(km, opt->mask_level, opt->mask_len, *n_regs, regs, opt->a * 2 + opt->b, opt->flag&MM_F_HARD_MLEVEL, opt->alt_drop); if (n_segs <= 1) mm_select_sub(km, opt->pri_ratio, mi->k*2, opt->best_n, n_regs, regs); else mm_select_sub_multi(km, opt->pri_ratio, 0.2f, 0.7f, max_chain_gap_ref, mi->k*2, opt->best_n, n_segs, qlens, n_regs, regs); if (!(opt->flag & (MM_F_SPLICE|MM_F_SR|MM_F_NO_LJOIN))) // long join not working well without primary chains mm_join_long(km, opt, qlen, n_regs, regs, a); } } static mm_reg1_t *align_regs(const mm_mapopt_t *opt, const mm_idx_t *mi, void *km, int qlen, const char *seq, int *n_regs, mm_reg1_t *regs, mm128_t *a) { if (!(opt->flag & MM_F_CIGAR)) return regs; regs = mm_align_skeleton(km, opt, mi, qlen, seq, n_regs, regs, a); // this calls mm_filter_regs() if (!(opt->flag & MM_F_ALL_CHAINS)) { // don't choose primary mapping(s) mm_set_parent(km, opt->mask_level, opt->mask_len, *n_regs, regs, opt->a * 2 + opt->b, opt->flag&MM_F_HARD_MLEVEL, opt->alt_drop); mm_select_sub(km, opt->pri_ratio, mi->k*2, opt->best_n, n_regs, regs); mm_set_sam_pri(*n_regs, regs); } return regs; } void mm_map_frag(const mm_idx_t *mi, int n_segs, const int *qlens, const char **seqs, int *n_regs, mm_reg1_t **regs, mm_tbuf_t *b, const mm_mapopt_t *opt, const char *qname) { int i, j, rep_len, qlen_sum, n_regs0, n_mini_pos; int max_chain_gap_qry, max_chain_gap_ref, min_chain_gap_ref, is_splice = !!(opt->flag & MM_F_SPLICE), is_sr = !!(opt->flag & MM_F_SR); uint32_t hash; int64_t n_a; uint64_t *u, *mini_pos; mm128_t *a; mm128_v mv = {0,0,0}; mm_reg1_t *regs0; km_stat_t kmst; //TODO: generalize this to n_segs > 1 assert (n_segs == 1); //deal with long reads (or asm contigs) only mm128_t **collect_a; int64_t *collect_n_a; mm_tbuf_t *b_master = b; //buffer for main thread entering this function //stage1: Pre-compute confident read alignments of substrings of input read //define new set of options for first stage //generate many candidate alignments to improve mapq estimation mm_mapopt_t opt2 = *opt; mm_mapopt_t *opt_2 = &opt2; opt_2->best_n = std::max(5, opt_2->best_n); //set minimum int countStartingPositions = 1 + std::ceil(qlens[0] * 1.0 / opt_2->suffixSampleOffset); collect_a = (mm128_t**)kmalloc(b->km, countStartingPositions * sizeof(mm128_t*)); collect_n_a = (int64_t *)kmalloc(b->km, countStartingPositions * sizeof(int64_t)); memset(collect_n_a, 0, countStartingPositions * sizeof(int64_t)); //create a boolean vector to indicate what portion of read were mapped using MCASs int8_t* seqMapped = (int8_t *)kmalloc(b->km, qlens[0] * sizeof(int8_t)); memset(seqMapped, 0, qlens[0] * sizeof(int8_t)); //check if SVaware mode enabled and query length is sufficient if (opt_2->SVaware && qlens[0] >= opt_2->SVawareMinReadLength) { //parallelize single read alignment further for better load balance #pragma omp parallel num_threads(OMP_PER_READ_THREADS) { //make all these variables private to openmp thread by redefining them int i, j, rep_len, qlen_sum, n_regs0, n_mini_pos; uint32_t hash; int64_t n_a; uint64_t *u, *mini_pos; mm128_t *a; mm128_v mv = {0,0,0}; mm_reg1_t *regs0; mm_tbuf_t *b = (mm_tbuf_t*)calloc(1, sizeof(mm_tbuf_t)); //omp thread local buffer b->km = km_init(); int* sub_qlens = (int *)kmalloc(b->km, 1 * sizeof(int)); char **sub_seqs = (char **) kmalloc(b->km, 1 * sizeof(char*)); sub_seqs[0] = (char *)kmalloc(b->km, qlens[0] * sizeof(char)); #pragma omp for schedule(dynamic) for (int sub_begin = 0; sub_begin < qlens[0] + opt_2->suffixSampleOffset - 1; sub_begin += opt_2->suffixSampleOffset) { int suffix_id = sub_begin / opt_2->suffixSampleOffset; //id for this string end-point bool mappingFound = false; int max_mapq_currentPos = 0; if (sub_begin >= qlens[0]) sub_begin = qlens[0]-1; //for last iter assert (sub_begin >= 0 && sub_begin < qlens[0]); for (int sub_len = opt_2->minPrefixLength; sub_len <= opt_2->maxPrefixLength; sub_len *= opt_2->prefixIncrementFactor) { //consider 'sub_len' bases to the right if (sub_begin + sub_len <= qlens[0]) //check substring end boundary limit { mv = {0,0,0}; sub_qlens[0] = sub_len; memcpy (sub_seqs[0], &(seqs[0][sub_begin]), sub_len); for (i = 0, qlen_sum = 0; i < n_segs; ++i) qlen_sum += sub_qlens[i], n_regs[i] = 0, regs[i] = 0, n_regs0 = 0; if (qlen_sum == 0 || n_segs <= 0 || n_segs > MM_MAX_SEG) break; if (opt_2->max_qlen > 0 && qlen_sum > opt_2->max_qlen) break; hash = qname? __ac_X31_hash_string(qname) : 0; hash ^= __ac_Wang_hash(qlen_sum) + __ac_Wang_hash(opt_2->seed); hash = __ac_Wang_hash(hash); collect_minimizers(b->km, opt_2, mi, n_segs, sub_qlens, sub_seqs, &mv); if (opt_2->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_2, opt_2->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_2, opt_2->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); if (mm_dbg_flag & MM_DBG_PRINT_SEED) { fprintf(stderr, "RS\t%d\n", rep_len); for (i = 0; i < n_a; ++i) fprintf(stderr, "SD\t%s\t%d\t%c\t%d\t%d\t%d\n", mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == 0? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); } // set max chaining gap on the query and the reference sequence if (is_sr) max_chain_gap_qry = qlen_sum > opt_2->max_gap? qlen_sum : opt_2->max_gap; else max_chain_gap_qry = opt_2->max_gap; if (opt_2->max_gap_ref > 0) { max_chain_gap_ref = opt_2->max_gap_ref; // always honor mm_mapopt_2_t::max_gap_ref if set } else if (opt_2->max_frag_len > 0) { max_chain_gap_ref = opt_2->max_frag_len - qlen_sum; if (max_chain_gap_ref < opt_2->max_gap) max_chain_gap_ref = opt_2->max_gap; } else max_chain_gap_ref = opt_2->max_gap; if (opt_2->min_gap_ref < max_chain_gap_ref) min_chain_gap_ref = opt_2->min_gap_ref; else min_chain_gap_ref = max_chain_gap_ref; a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_2->bw, opt_2->max_chain_skip, opt_2->max_chain_iter, opt_2->min_cnt, opt_2->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); if (opt_2->max_occ > opt_2->mid_occ && rep_len > 0) { int rechain = 0; if (n_regs0 > 0) { // test if the best chain has all the segments int n_chained_segs = 1, max = 0, max_i = -1, max_off = -1, off = 0; for (i = 0; i < n_regs0; ++i) { // find the best chain if (max < (int)(u[i]>>32)) max = u[i]>>32, max_i = i, max_off = off; off += (uint32_t)u[i]; } for (i = 1; i < (int32_t)u[max_i]; ++i) // count the number of segments in the best chain if ((a[max_off+i].y&MM_SEED_SEG_MASK) != (a[max_off+i-1].y&MM_SEED_SEG_MASK)) ++n_chained_segs; if (n_chained_segs < n_segs) rechain = 1; } else rechain = 1; if (rechain) { // redo chaining with a higher max_occ threshold kfree(b->km, a); kfree(b->km, u); kfree(b->km, mini_pos); if (opt_2->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_2, opt_2->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_2, opt_2->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_2->bw, opt_2->max_chain_skip, opt_2->max_chain_iter, opt_2->min_cnt, opt_2->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); } } b->frag_gap = max_chain_gap_ref; b->rep_len = rep_len; regs0 = mm_gen_regs(b->km, hash, qlen_sum, n_regs0, u, a); if (mm_dbg_flag & MM_DBG_PRINT_SEED) for (j = 0; j < n_regs0; ++j) for (i = regs0[j].as; i < regs0[j].as + regs0[j].cnt; ++i) fprintf(stderr, "CN\t%d\t%s\t%d\t%c\t%d\t%d\t%d\n", j, mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == regs0[j].as? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); chain_post(opt_2, max_chain_gap_ref, mi, b->km, qlen_sum, n_segs, qlens, &n_regs0, regs0, a); if (!is_sr) mm_est_err(mi, qlen_sum, n_regs0, regs0, a, n_mini_pos, mini_pos); if (n_segs == 1) { // uni-segment regs0 = align_regs(opt_2, mi, b->km, sub_qlens[0], sub_seqs[0], &n_regs0, regs0, a); mm_set_mapq(b->km, n_regs0, regs0, opt_2->min_chain_score, opt_2->a, rep_len, is_sr); n_regs[0] = n_regs0, regs[0] = regs0; } else { // multi-segment mm_seg_t *seg; seg = mm_seg_gen(b->km, hash, n_segs, qlens, n_regs0, regs0, n_regs, regs, a); // split fragment chain to separate segment chains free(regs0); for (i = 0; i < n_segs; ++i) { mm_set_parent(b->km, opt_2->mask_level, opt_2->mask_len, n_regs[i], regs[i], opt_2->a * 2 + opt_2->b, opt_2->flag&MM_F_HARD_MLEVEL, opt_2->alt_drop); // update mm_reg1_t::parent regs[i] = align_regs(opt_2, mi, b->km, qlens[i], seqs[i], &n_regs[i], regs[i], seg[i].a); mm_set_mapq(b->km, n_regs[i], regs[i], opt_2->min_chain_score, opt_2->a, rep_len, is_sr); } mm_seg_free(b->km, n_segs, seg); if (n_segs == 2 && opt_2->pe_ori >= 0 && (opt_2->flag&MM_F_CIGAR)) mm_pair(b->km, max_chain_gap_ref, opt_2->pe_bonus, opt_2->a * 2 + opt_2->b, opt_2->a, qlens, n_regs, regs); // pairing } int mostPromisingMapping = -1; int max_mapq_fragment = 0; //For valid mapping, save anchors for (j = 0; j < n_regs0; ++j) { max_mapq_fragment = std::max ((int32_t)regs0[j].mapq, max_mapq_fragment); max_mapq_currentPos = std::max (max_mapq_fragment, max_mapq_currentPos); //Check for high confidence (mapq), length if (regs0[j].mapq >= opt_2->min_mapq && regs0[j].blen >= opt_2->min_qcov * sub_len && regs0[j].cnt > 0) { mappingFound = true; mostPromisingMapping = j; collect_n_a[suffix_id] = regs0[j].cnt; if (mm_dbg_flag & MM_DBG_POLISH) { //print MCAS information in paf-like format, helpful for debugging & dot-plotting MCAS alignments fprintf(stderr, "PO\t%s %d %d %d %c %s %d %d %d %d %d %d %d [FOUND] \n", qname, qlens[0], sub_begin + regs0[j].qs, sub_begin + regs0[j].qe, "+-"[regs0[j].rev] , mi->seq[regs0[j].rid].name, mi->seq[regs0[j].rid].len, regs0[j].rs, regs0[j].re, regs0[j].mapq, suffix_id, sub_begin, sub_len); } break; } } if ((mm_dbg_flag & MM_DBG_POLISH) && !mappingFound) fprintf(stderr, "PO\tqname:%s, suffid:%d, begin:%d, len:%d, max_mapq:%d, n_regs0:%d [NONE FOUND] \n", qname, suffix_id, sub_begin, sub_len, max_mapq_fragment, n_regs0); if (mappingFound) { assert (collect_n_a[suffix_id] > 0); assert (mostPromisingMapping >= 0); #pragma omp critical { collect_a[suffix_id] = (mm128_t*)kmalloc(b_master->km, collect_n_a[suffix_id] * sizeof(mm128_t)); } j = mostPromisingMapping; for (i = 0; i < regs0[j].cnt; ++i) { mm128_t _a_ = a[i + regs0[j].as]; //correct coordinates of each anchor while storing if (_a_.x >> 63) //reverse strand _a_.y += (qlens[0] - sub_begin - sub_len); else _a_.y += sub_begin; collect_a[suffix_id][i] = _a_; } //mark mapped interval in boolean vector #pragma omp critical { for(i = sub_begin; i < sub_begin + sub_len; i++) seqMapped[i] = 1; } } for (j = 0; j < n_regs0; ++j) {free (regs0[j].p);} free (regs0); kfree(b->km, mv.a); kfree(b->km, a); kfree(b->km, u); kfree(b->km, mini_pos); if (mappingFound || !n_regs0) break; // mappingFound-> found shortest prefix; !n_regs0-> no candidate } //consider 'sub_len' bases to the left if (sub_begin - sub_len + 1 >= 0) //check substring start boundary limit { mv = {0,0,0}; sub_qlens[0] = sub_len; memcpy (sub_seqs[0], &(seqs[0][sub_begin - sub_len +1]), sub_len); for (i = 0, qlen_sum = 0; i < n_segs; ++i) qlen_sum += sub_qlens[i], n_regs[i] = 0, regs[i] = 0, n_regs0 = 0; if (qlen_sum == 0 || n_segs <= 0 || n_segs > MM_MAX_SEG) break; if (opt_2->max_qlen > 0 && qlen_sum > opt_2->max_qlen) break; hash = qname? __ac_X31_hash_string(qname) : 0; hash ^= __ac_Wang_hash(qlen_sum) + __ac_Wang_hash(opt_2->seed); hash = __ac_Wang_hash(hash); collect_minimizers(b->km, opt_2, mi, n_segs, sub_qlens, sub_seqs, &mv); if (opt_2->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_2, opt_2->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_2, opt_2->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); if (mm_dbg_flag & MM_DBG_PRINT_SEED) { fprintf(stderr, "RS\t%d\n", rep_len); for (i = 0; i < n_a; ++i) fprintf(stderr, "SD\t%s\t%d\t%c\t%d\t%d\t%d\n", mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == 0? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); } // set max chaining gap on the query and the reference sequence if (is_sr) max_chain_gap_qry = qlen_sum > opt_2->max_gap? qlen_sum : opt_2->max_gap; else max_chain_gap_qry = opt_2->max_gap; if (opt_2->max_gap_ref > 0) { max_chain_gap_ref = opt_2->max_gap_ref; // always honor mm_mapopt_2_t::max_gap_ref if set } else if (opt_2->max_frag_len > 0) { max_chain_gap_ref = opt_2->max_frag_len - qlen_sum; if (max_chain_gap_ref < opt_2->max_gap) max_chain_gap_ref = opt_2->max_gap; } else max_chain_gap_ref = opt_2->max_gap; if (opt_2->min_gap_ref < max_chain_gap_ref) min_chain_gap_ref = opt_2->min_gap_ref; else min_chain_gap_ref = max_chain_gap_ref; a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_2->bw, opt_2->max_chain_skip, opt_2->max_chain_iter, opt_2->min_cnt, opt_2->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); if (opt_2->max_occ > opt_2->mid_occ && rep_len > 0) { int rechain = 0; if (n_regs0 > 0) { // test if the best chain has all the segments int n_chained_segs = 1, max = 0, max_i = -1, max_off = -1, off = 0; for (i = 0; i < n_regs0; ++i) { // find the best chain if (max < (int)(u[i]>>32)) max = u[i]>>32, max_i = i, max_off = off; off += (uint32_t)u[i]; } for (i = 1; i < (int32_t)u[max_i]; ++i) // count the number of segments in the best chain if ((a[max_off+i].y&MM_SEED_SEG_MASK) != (a[max_off+i-1].y&MM_SEED_SEG_MASK)) ++n_chained_segs; if (n_chained_segs < n_segs) rechain = 1; } else rechain = 1; if (rechain) { // redo chaining with a higher max_occ threshold kfree(b->km, a); kfree(b->km, u); kfree(b->km, mini_pos); if (opt_2->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_2, opt_2->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_2, opt_2->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_2->bw, opt_2->max_chain_skip, opt_2->max_chain_iter, opt_2->min_cnt, opt_2->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); } } b->frag_gap = max_chain_gap_ref; b->rep_len = rep_len; regs0 = mm_gen_regs(b->km, hash, qlen_sum, n_regs0, u, a); if (mm_dbg_flag & MM_DBG_PRINT_SEED) for (j = 0; j < n_regs0; ++j) for (i = regs0[j].as; i < regs0[j].as + regs0[j].cnt; ++i) fprintf(stderr, "CN\t%d\t%s\t%d\t%c\t%d\t%d\t%d\n", j, mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == regs0[j].as? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); chain_post(opt_2, max_chain_gap_ref, mi, b->km, qlen_sum, n_segs, qlens, &n_regs0, regs0, a); if (!is_sr) mm_est_err(mi, qlen_sum, n_regs0, regs0, a, n_mini_pos, mini_pos); if (n_segs == 1) { // uni-segment regs0 = align_regs(opt_2, mi, b->km, sub_qlens[0], sub_seqs[0], &n_regs0, regs0, a); mm_set_mapq(b->km, n_regs0, regs0, opt_2->min_chain_score, opt_2->a, rep_len, is_sr); n_regs[0] = n_regs0, regs[0] = regs0; } else { // multi-segment mm_seg_t *seg; seg = mm_seg_gen(b->km, hash, n_segs, qlens, n_regs0, regs0, n_regs, regs, a); // split fragment chain to separate segment chains free(regs0); for (i = 0; i < n_segs; ++i) { mm_set_parent(b->km, opt->mask_level, opt->mask_len, n_regs[i], regs[i], opt->a * 2 + opt->b, opt->flag&MM_F_HARD_MLEVEL, opt->alt_drop); // update mm_reg1_t::parent regs[i] = align_regs(opt_2, mi, b->km, qlens[i], seqs[i], &n_regs[i], regs[i], seg[i].a); mm_set_mapq(b->km, n_regs[i], regs[i], opt_2->min_chain_score, opt_2->a, rep_len, is_sr); } mm_seg_free(b->km, n_segs, seg); if (n_segs == 2 && opt_2->pe_ori >= 0 && (opt_2->flag&MM_F_CIGAR)) mm_pair(b->km, max_chain_gap_ref, opt_2->pe_bonus, opt_2->a * 2 + opt_2->b, opt_2->a, qlens, n_regs, regs); // pairing } int mostPromisingMapping = -1; int max_mapq_fragment = 0; //For valid mapping, save anchors for (j = 0; j < n_regs0; ++j) { max_mapq_fragment = std::max ((int32_t)regs0[j].mapq, max_mapq_fragment); max_mapq_currentPos = std::max (max_mapq_fragment, max_mapq_currentPos); //Check for high confidence (mapq), length if (regs0[j].mapq >= opt_2->min_mapq && regs0[j].blen >= opt_2->min_qcov * sub_len && regs0[j].cnt > 0) { mappingFound = true; mostPromisingMapping = j; collect_n_a[suffix_id] = regs0[j].cnt; if (mm_dbg_flag & MM_DBG_POLISH) { //print MCAS information in paf-like format, helpful for debugging & dot-plotting MCAS alignments fprintf(stderr, "PO\t%s %d %d %d %c %s %d %d %d %d %d %d %d [FOUND] \n", qname, qlens[0], sub_begin - sub_len + regs0[j].qs, sub_begin - sub_len + regs0[j].qe, "+-"[regs0[j].rev] , mi->seq[regs0[j].rid].name, mi->seq[regs0[j].rid].len, regs0[j].rs, regs0[j].re, regs0[j].mapq, suffix_id, sub_begin, -1 * sub_len); } break; } } if ((mm_dbg_flag & MM_DBG_POLISH) && !mappingFound) fprintf(stderr, "PO\tqname:%s, suffid:%d, begin:%d, len:%d, max_mapq:%d, n_regs0:%d [NONE FOUND] \n", qname, suffix_id, sub_begin, -1 * sub_len, max_mapq_fragment, n_regs0); if (mappingFound) { assert (collect_n_a[suffix_id] > 0); assert (mostPromisingMapping >= 0); #pragma omp critical { collect_a[suffix_id] = (mm128_t*)kmalloc(b_master->km, collect_n_a[suffix_id] * sizeof(mm128_t)); } j = mostPromisingMapping; for (i = 0; i < regs0[j].cnt; ++i) { mm128_t _a_ = a[i + regs0[j].as]; //correct coordinates of each anchor while storing if (_a_.x >> 63) //reverse strand _a_.y += (qlens[0]-1) - sub_begin; else _a_.y += sub_begin - sub_len + 1; //offset of first base of substring collect_a[suffix_id][i] = _a_; } //mark mapped interval in boolean vector #pragma omp critical { for(i = sub_begin - sub_len +1; i <= sub_begin; i++) seqMapped[i] = 1; } } for (j = 0; j < n_regs0; ++j) {free (regs0[j].p);} free (regs0); kfree(b->km, mv.a); kfree(b->km, a); kfree(b->km, u); kfree(b->km, mini_pos); if (mappingFound || !n_regs0) break; // mappingFound-> found shortest prefix; !n_regs0-> no candidate } } if ((mm_dbg_flag & MM_DBG_POLISH) && !mappingFound) fprintf(stderr, "PO\tqname:%s, begin:%d, max_mapq_currentPos:%d [NONE FOUND] \n", qname, sub_begin, max_mapq_currentPos); } //free openmp thread specific memory kfree(b->km, sub_qlens); kfree(b->km, sub_seqs[0]); kfree(b->km, sub_seqs); mm_tbuf_destroy(b); } } if (mm_dbg_flag & MM_DBG_POLISH) { int mappedcnt = 0; for (i = 0; i < qlens[0]; i++) if (seqMapped[i]) mappedcnt++; fprintf(stderr, "PO\tqname:%s, count of mapped query bases = %d among %d\n", qname, mappedcnt, qlens[0]); } //define new set of options for next stage //we can make stage 2 as sensitive as possible with very few seeds remaining mm_mapopt_t opt3 = *opt; mm_mapopt_t *opt_3 = &opt3; opt_3->zdrop_inv = std::min (opt->zdrop_inv, opt->stage2_zdrop_inv); opt_3->bw= std::max(opt->bw, opt->stage2_bw); //increased gap helps compensate for sometimes missing seeds along correct alignments opt_3->max_gap = std::max(opt->max_gap, opt->stage2_max_gap); //Re-run mapping with the above selected anchors { for (i = 0, qlen_sum = 0; i < n_segs; ++i) qlen_sum += qlens[i], n_regs[i] = 0, regs[i] = 0, n_regs0 = 0; if (qlen_sum == 0 || n_segs <= 0 || n_segs > MM_MAX_SEG) return; if (opt_3->max_qlen > 0 && qlen_sum > opt_3->max_qlen) return; hash = qname? __ac_X31_hash_string(qname) : 0; hash ^= __ac_Wang_hash(qlen_sum) + __ac_Wang_hash(opt_3->seed); hash = __ac_Wang_hash(hash); //Use anchors from our own analysis n_a = 0; for (i = 0; i < countStartingPositions; i++) n_a += collect_n_a[i]; if ((mm_dbg_flag & MM_DBG_POLISH) && opt->SVaware) fprintf(stderr, "PO\tqname:%s, n_a (before filtering and checking for duplicates) :%" PRId64 "\n", qname, n_a); if (n_a) { //allocate sufficient memory a = (mm128_t*)kmalloc(b->km, n_a * sizeof(mm128_t)); //set values of anchors int64_t n_a_counter = 0; for (i = 0; i < countStartingPositions; i++) for (j=0; j<collect_n_a[i]; j++) a[n_a_counter++] = collect_a[i][j]; //discard duplicate entries int64_t n_a_unique = 0; std::sort(a, a + n_a, [](const mm128_t &a, const mm128_t &b){return std::tie(a.x, a.y) < std::tie(b.x, b.y);}); //traverse through the array elements for (i = 0; i < n_a;) { j = i; while (j < n_a && std::tie(a[i].x, a[i].y) == std::tie(a[j].x, a[j].y)) j++; //j will increment at least by one here a[n_a_unique++] = a[i]; i = j; } n_a = n_a_unique; if (mm_dbg_flag & MM_DBG_POLISH) fprintf(stderr, "PO\tqname:%s, n_a (after filtering and checking for duplicates) :%" PRId64 ", min_cnt:%d\n", qname, n_a, opt_3->min_cnt); //sort anchors by reference position before moving on radix_sort_128x(a, a + n_a); if (n_a < opt_3->min_cnt) //insufficient no. of seeds { n_a = 0; //reset to 0 kfree(b->km, a); } } } //collect additional anchors from unmapped intervals { //if we have found MCAS-based anchors, but with a few unmapped read intervals int unmappedcnt = 0; for (i = 0; i < qlens[0]; i++) if (seqMapped[i]==0) unmappedcnt++; if (n_a > 0 && unmappedcnt > 0) { char **unmapped_seqs = (char **) kmalloc(b->km, 1 * sizeof(char*)); unmapped_seqs[0] = (char *)kmalloc(b->km, qlens[0] * sizeof(char)); for (i = 0; i < qlens[0]; i++) { if (seqMapped[i] > 0) unmapped_seqs[0][i] = 'N'; else unmapped_seqs[0][i] = seqs[0][i]; } if (mm_dbg_flag & MM_DBG_POLISH) fprintf(stderr, "PO\tqname:%s, n_a (before mapping the unmapped read substrings) :%" PRId64 "\n", qname, n_a); mm128_t *a_remaining; int64_t n_a_remaining; mv = {0,0,0}; collect_minimizers(b->km, opt_3, mi, n_segs, qlens, unmapped_seqs, &mv); if (opt_3->flag & MM_F_HEAP_SORT) a_remaining = collect_seed_hits_heap(b->km, opt_3, opt_3->mid_occ, mi, qname, &mv, qlen_sum, &n_a_remaining, &rep_len, &n_mini_pos, &mini_pos); else a_remaining = collect_seed_hits(b->km, opt_3, opt_3->mid_occ, mi, qname, &mv, qlen_sum, &n_a_remaining, &rep_len, &n_mini_pos, &mini_pos); kfree(b->km, mv.a); kfree(b->km, mini_pos); int64_t n_a_whole = n_a_remaining + n_a; mm128_t *a_whole = (mm128_t*)kmalloc(b->km, n_a_whole * sizeof(mm128_t)); for (i=0; i<n_a; i++) { #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" a_whole[i] = a[i]; } for (i=n_a; i<n_a_whole; i++) { a_whole[i] = a_remaining[i-n_a]; } //sort anchors by reference position before moving on radix_sort_128x(a_whole, a_whole + n_a_whole); kfree(b->km, a); kfree(b->km, a_remaining); a = a_whole; n_a = n_a_whole; kfree(b->km, unmapped_seqs[0]); kfree(b->km, unmapped_seqs); if (mm_dbg_flag & MM_DBG_POLISH) fprintf(stderr, "PO\tqname:%s, n_a (after mapping the unmapped read substrings) :%" PRId64 "\n", qname, n_a); } } { if (!n_a) //MCAS-method couldn't be used { //go with the default route if ((mm_dbg_flag & MM_DBG_POLISH) && opt->SVaware) fprintf(stderr, "PO\tfalling back to default mapping algorithm for read: %s\n", qname); //revert to original parameters *opt_3 = *opt; mv = {0,0,0}; collect_minimizers(b->km, opt_3, mi, n_segs, qlens, seqs, &mv); if (opt_3->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_3, opt_3->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_3, opt_3->mid_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); kfree(b->km, mv.a); kfree(b->km, mini_pos); } if (mm_dbg_flag & MM_DBG_PRINT_SEED) { fprintf(stderr, "RS\t%d\n", rep_len); for (i = 0; i < n_a; ++i) fprintf(stderr, "SD\t%s\t%d\t%c\t%d\t%d\t%d\n", mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == 0? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); } // set max chaining gap on the query and the reference sequence if (is_sr) max_chain_gap_qry = qlen_sum > opt_3->max_gap? qlen_sum : opt_3->max_gap; else max_chain_gap_qry = opt_3->max_gap; if (opt_3->max_gap_ref > 0) { max_chain_gap_ref = opt_3->max_gap_ref; // always honor mm_mapopt_3_t::max_gap_ref if set } else if (opt_3->max_frag_len > 0) { max_chain_gap_ref = opt_3->max_frag_len - qlen_sum; if (max_chain_gap_ref < opt_3->max_gap) max_chain_gap_ref = opt_3->max_gap; } else max_chain_gap_ref = opt_3->max_gap; if (opt_3->min_gap_ref < max_chain_gap_ref) min_chain_gap_ref = opt_3->min_gap_ref; else min_chain_gap_ref = max_chain_gap_ref; a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_3->bw, opt_3->max_chain_skip, opt_3->max_chain_iter, opt_3->min_cnt, opt_3->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); if (opt_3->max_occ > opt_3->mid_occ && rep_len > 0) { int rechain = 0; if (n_regs0 > 0) { // test if the best chain has all the segments int n_chained_segs = 1, max = 0, max_i = -1, max_off = -1, off = 0; for (i = 0; i < n_regs0; ++i) { // find the best chain if (max < (int)(u[i]>>32)) max = u[i]>>32, max_i = i, max_off = off; off += (uint32_t)u[i]; } for (i = 1; i < (int32_t)u[max_i]; ++i) // count the number of segments in the best chain if ((a[max_off+i].y&MM_SEED_SEG_MASK) != (a[max_off+i-1].y&MM_SEED_SEG_MASK)) ++n_chained_segs; if (n_chained_segs < n_segs) rechain = 1; } else rechain = 1; if (rechain) { // redo chaining with a higher max_occ threshold kfree(b->km, a); kfree(b->km, u); //kfree(b->km, mini_pos); //already freed above if (opt_3->flag & MM_F_HEAP_SORT) a = collect_seed_hits_heap(b->km, opt_3, opt_3->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); else a = collect_seed_hits(b->km, opt_3, opt_3->max_occ, mi, qname, &mv, qlen_sum, &n_a, &rep_len, &n_mini_pos, &mini_pos); a = mm_chain_dp(max_chain_gap_ref, min_chain_gap_ref, max_chain_gap_qry, opt_3->bw, opt_3->max_chain_skip, opt_3->max_chain_iter, opt_3->min_cnt, opt_3->min_chain_score, opt->chain_gap_scale, is_splice, n_segs, n_a, a, &n_regs0, &u, b->km); } } b->frag_gap = max_chain_gap_ref; b->rep_len = rep_len; regs0 = mm_gen_regs(b->km, hash, qlen_sum, n_regs0, u, a); if (mm_dbg_flag & MM_DBG_PRINT_SEED) for (j = 0; j < n_regs0; ++j) for (i = regs0[j].as; i < regs0[j].as + regs0[j].cnt; ++i) fprintf(stderr, "CN\t%d\t%s\t%d\t%c\t%d\t%d\t%d\n", j, mi->seq[a[i].x<<1>>33].name, (int32_t)a[i].x, "+-"[a[i].x>>63], (int32_t)a[i].y, (int32_t)(a[i].y>>32&0xff), i == regs0[j].as? 0 : ((int32_t)a[i].y - (int32_t)a[i-1].y) - ((int32_t)a[i].x - (int32_t)a[i-1].x)); chain_post(opt_3, max_chain_gap_ref, mi, b->km, qlen_sum, n_segs, qlens, &n_regs0, regs0, a); //This function generates lot of warnings /*if (!is_sr) mm_est_err(mi, qlen_sum, n_regs0, regs0, a, n_mini_pos, mini_pos);*/ if (n_segs == 1) { // uni-segment regs0 = align_regs(opt_3, mi, b->km, qlens[0], seqs[0], &n_regs0, regs0, a); mm_set_mapq(b->km, n_regs0, regs0, opt_3->min_chain_score, opt_3->a, rep_len, is_sr); n_regs[0] = n_regs0, regs[0] = regs0; //TODO: find a better way to compute mapping quality } else { // multi-segment mm_seg_t *seg; seg = mm_seg_gen(b->km, hash, n_segs, qlens, n_regs0, regs0, n_regs, regs, a); // split fragment chain to separate segment chains free(regs0); for (i = 0; i < n_segs; ++i) { mm_set_parent(b->km, opt_3->mask_level, opt_3->mask_len, n_regs[i], regs[i], opt_3->a * 2 + opt_3->b, opt_3->flag&MM_F_HARD_MLEVEL, opt->alt_drop); // update mm_reg1_t::parent regs[i] = align_regs(opt_3, mi, b->km, qlens[i], seqs[i], &n_regs[i], regs[i], seg[i].a); mm_set_mapq(b->km, n_regs[i], regs[i], opt_3->min_chain_score, opt_3->a, rep_len, is_sr); } mm_seg_free(b->km, n_segs, seg); if (n_segs == 2 && opt_3->pe_ori >= 0 && (opt_3->flag&MM_F_CIGAR)) mm_pair(b->km, max_chain_gap_ref, opt_3->pe_bonus, opt_3->a * 2 + opt_3->b, opt_3->a, qlens, n_regs, regs); // pairing } kfree(b->km, a); kfree(b->km, u); /*kfree(b->km, mini_pos);*/ /*kfree(b->km, mv.a);*/ } for (i = 0; i < countStartingPositions; i++) if (collect_n_a[i] > 0) kfree(b->km, collect_a[i]); kfree(b->km, collect_a); kfree(b->km, collect_n_a); kfree(b->km, seqMapped); if (b->km) { km_stat(b->km, &kmst); if (mm_dbg_flag & MM_DBG_PRINT_QNAME) fprintf(stderr, "QM\t%s\t%d\tcap=%ld,nCore=%ld,largest=%ld\n", qname, qlen_sum, kmst.capacity, kmst.n_cores, kmst.largest); assert(kmst.n_blocks == kmst.n_cores); // otherwise, there is a memory leak if (kmst.largest > 1U<<28) { km_destroy(b->km); b->km = km_init(); } } } mm_reg1_t *mm_map(const mm_idx_t *mi, int qlen, const char *seq, int *n_regs, mm_tbuf_t *b, const mm_mapopt_t *opt, const char *qname) { mm_reg1_t *regs; mm_map_frag(mi, 1, &qlen, &seq, n_regs, &regs, b, opt, qname); return regs; } /************************** * Multi-threaded mapping * **************************/ typedef struct { int mini_batch_size, n_processed, n_threads, n_fp; const mm_mapopt_t *opt; mm_bseq_file_t **fp; const mm_idx_t *mi; kstring_t str; int n_parts; uint32_t *rid_shift; FILE *fp_split, **fp_parts; } pipeline_t; typedef struct { const pipeline_t *p; int n_seq, n_frag; mm_bseq1_t *seq; int *n_reg, *seg_off, *n_seg, *rep_len, *frag_gap; mm_reg1_t **reg; mm_tbuf_t **buf; } step_t; static void worker_for(void *_data, long i, int tid) // kt_for() callback { step_t *s = (step_t*)_data; int qlens[MM_MAX_SEG], j, off = s->seg_off[i], pe_ori = s->p->opt->pe_ori; const char *qseqs[MM_MAX_SEG]; mm_tbuf_t *b = s->buf[tid]; assert(s->n_seg[i] <= MM_MAX_SEG); if (mm_dbg_flag & MM_DBG_PRINT_QNAME) fprintf(stderr, "QR\t%s\t%d\t%d\n", s->seq[off].name, tid, s->seq[off].l_seq); for (j = 0; j < s->n_seg[i]; ++j) { if (s->n_seg[i] == 2 && ((j == 0 && (pe_ori>>1&1)) || (j == 1 && (pe_ori&1)))) mm_revcomp_bseq(&s->seq[off + j]); qlens[j] = s->seq[off + j].l_seq; qseqs[j] = s->seq[off + j].seq; } if (s->p->opt->flag & MM_F_INDEPEND_SEG) { for (j = 0; j < s->n_seg[i]; ++j) { mm_map_frag(s->p->mi, 1, &qlens[j], &qseqs[j], &s->n_reg[off+j], &s->reg[off+j], b, s->p->opt, s->seq[off+j].name); s->rep_len[off + j] = b->rep_len; s->frag_gap[off + j] = b->frag_gap; } } else { mm_map_frag(s->p->mi, s->n_seg[i], qlens, qseqs, &s->n_reg[off], &s->reg[off], b, s->p->opt, s->seq[off].name); for (j = 0; j < s->n_seg[i]; ++j) { s->rep_len[off + j] = b->rep_len; s->frag_gap[off + j] = b->frag_gap; } } for (j = 0; j < s->n_seg[i]; ++j) // flip the query strand and coordinate to the original read strand if (s->n_seg[i] == 2 && ((j == 0 && (pe_ori>>1&1)) || (j == 1 && (pe_ori&1)))) { int k, t; mm_revcomp_bseq(&s->seq[off + j]); for (k = 0; k < s->n_reg[off + j]; ++k) { mm_reg1_t *r = &s->reg[off + j][k]; t = r->qs; r->qs = qlens[j] - r->qe; r->qe = qlens[j] - t; r->rev = !r->rev; } } } static void merge_hits(step_t *s) { int f, i, k0, k, max_seg = 0, *n_reg_part, *rep_len_part, *frag_gap_part, *qlens; void *km; FILE **fp = s->p->fp_parts; const mm_mapopt_t *opt = s->p->opt; km = km_init(); for (f = 0; f < s->n_frag; ++f) max_seg = max_seg > s->n_seg[f]? max_seg : s->n_seg[f]; qlens = CALLOC(int, max_seg + s->p->n_parts * 3); n_reg_part = qlens + max_seg; rep_len_part = n_reg_part + s->p->n_parts; frag_gap_part = rep_len_part + s->p->n_parts; for (f = 0, k = k0 = 0; f < s->n_frag; ++f) { k0 = k; for (i = 0; i < s->n_seg[f]; ++i, ++k) { int j, l, t, rep_len = 0; qlens[i] = s->seq[k].l_seq; for (j = 0, s->n_reg[k] = 0; j < s->p->n_parts; ++j) { mm_err_fread(&n_reg_part[j], sizeof(int), 1, fp[j]); mm_err_fread(&rep_len_part[j], sizeof(int), 1, fp[j]); mm_err_fread(&frag_gap_part[j], sizeof(int), 1, fp[j]); s->n_reg[k] += n_reg_part[j]; if (rep_len < rep_len_part[j]) rep_len = rep_len_part[j]; } s->reg[k] = CALLOC(mm_reg1_t, s->n_reg[k]); for (j = 0, l = 0; j < s->p->n_parts; ++j) { for (t = 0; t < n_reg_part[j]; ++t, ++l) { mm_reg1_t *r = &s->reg[k][l]; uint32_t capacity; mm_err_fread(r, sizeof(mm_reg1_t), 1, fp[j]); r->rid += s->p->rid_shift[j]; if (opt->flag & MM_F_CIGAR) { mm_err_fread(&capacity, 4, 1, fp[j]); r->p = (mm_extra_t*)calloc(capacity, 4); r->p->capacity = capacity; mm_err_fread(r->p, r->p->capacity, 4, fp[j]); } } } mm_hit_sort(km, &s->n_reg[k], s->reg[k], opt->alt_drop); mm_set_parent(km, opt->mask_level, opt->mask_len, s->n_reg[k], s->reg[k], opt->a * 2 + opt->b, opt->flag&MM_F_HARD_MLEVEL, opt->alt_drop); if (!(opt->flag & MM_F_ALL_CHAINS)) { mm_select_sub(km, opt->pri_ratio, s->p->mi->k*2, opt->best_n, &s->n_reg[k], s->reg[k]); mm_set_sam_pri(s->n_reg[k], s->reg[k]); } mm_set_mapq(km, s->n_reg[k], s->reg[k], opt->min_chain_score, opt->a, rep_len, !!(opt->flag & MM_F_SR)); } if (s->n_seg[f] == 2 && opt->pe_ori >= 0 && (opt->flag&MM_F_CIGAR)) mm_pair(km, frag_gap_part[0], opt->pe_bonus, opt->a * 2 + opt->b, opt->a, qlens, &s->n_reg[k0], &s->reg[k0]); } free(qlens); km_destroy(km); } static void *worker_pipeline(void *shared, int step, void *in) { int i, j, k; pipeline_t *p = (pipeline_t*)shared; if (step == 0) { // step 0: read sequences int with_qual = (!!(p->opt->flag & MM_F_OUT_SAM) && !(p->opt->flag & MM_F_NO_QUAL)); int with_comment = !!(p->opt->flag & MM_F_COPY_COMMENT); int frag_mode = (p->n_fp > 1 || !!(p->opt->flag & MM_F_FRAG_MODE)); step_t *s; s = (step_t*)calloc(1, sizeof(step_t)); if (p->n_fp > 1) s->seq = mm_bseq_read_frag2(p->n_fp, p->fp, p->mini_batch_size, with_qual, with_comment, &s->n_seq); else s->seq = mm_bseq_read3(p->fp[0], p->mini_batch_size, with_qual, with_comment, frag_mode, &s->n_seq); if (s->seq) { s->p = p; for (i = 0; i < s->n_seq; ++i) s->seq[i].rid = p->n_processed++; //reshuffle based on length here, longer read first //NOTE: this would affect the ordering of reads in output { mm_bseq1_t *seq_copy = (mm_bseq1_t*) kmalloc(0, sizeof(mm_bseq1_t) * s->n_seq); std::vector< std::pair<int, int> > lengths; for (i = 0; i < s->n_seq; ++i) lengths.emplace_back (s->seq[i].l_seq, i); std::sort (lengths.begin(), lengths.end(), std::greater<std::pair<int,int>>()); for (i = 0; i < s->n_seq; ++i) { int prev_id = lengths[i].second; //copy all pointers seq_copy[i].l_seq = s->seq[prev_id].l_seq; seq_copy[i].rid = s->seq[prev_id].rid; seq_copy[i].name = s->seq[prev_id].name; seq_copy[i].seq = s->seq[prev_id].seq; seq_copy[i].qual = s->seq[prev_id].qual; seq_copy[i].comment = s->seq[prev_id].comment; } free(s->seq); s->seq = seq_copy; } s->buf = (mm_tbuf_t**)calloc(p->n_threads, sizeof(mm_tbuf_t*)); for (i = 0; i < p->n_threads; ++i) s->buf[i] = mm_tbuf_init(); s->n_reg = (int*)calloc(5 * s->n_seq, sizeof(int)); s->seg_off = s->n_reg + s->n_seq; // seg_off, n_seg, rep_len and frag_gap are allocated together with n_reg s->n_seg = s->seg_off + s->n_seq; s->rep_len = s->n_seg + s->n_seq; s->frag_gap = s->rep_len + s->n_seq; s->reg = (mm_reg1_t**)calloc(s->n_seq, sizeof(mm_reg1_t*)); for (i = 1, j = 0; i <= s->n_seq; ++i) if (i == s->n_seq || !frag_mode || !mm_qname_same(s->seq[i-1].name, s->seq[i].name)) { s->n_seg[s->n_frag] = i - j; s->seg_off[s->n_frag++] = j; j = i; } return s; } else free(s); } else if (step == 1) { // step 1: map if (p->n_parts > 0) merge_hits((step_t*)in); else kt_for(p->n_threads, worker_for, in, ((step_t*)in)->n_frag); return in; } else if (step == 2) { // step 2: output void *km = 0; step_t *s = (step_t*)in; const mm_idx_t *mi = p->mi; for (i = 0; i < p->n_threads; ++i) mm_tbuf_destroy(s->buf[i]); free(s->buf); if ((p->opt->flag & MM_F_OUT_CS) && !(mm_dbg_flag & MM_DBG_NO_KALLOC)) km = km_init(); for (k = 0; k < s->n_frag; ++k) { int seg_st = s->seg_off[k], seg_en = s->seg_off[k] + s->n_seg[k]; for (i = seg_st; i < seg_en; ++i) { mm_bseq1_t *t = &s->seq[i]; if (p->opt->split_prefix && p->n_parts == 0) { // then write to temporary files mm_err_fwrite(&s->n_reg[i], sizeof(int), 1, p->fp_split); mm_err_fwrite(&s->rep_len[i], sizeof(int), 1, p->fp_split); mm_err_fwrite(&s->frag_gap[i], sizeof(int), 1, p->fp_split); for (j = 0; j < s->n_reg[i]; ++j) { mm_reg1_t *r = &s->reg[i][j]; mm_err_fwrite(r, sizeof(mm_reg1_t), 1, p->fp_split); if (p->opt->flag & MM_F_CIGAR) { mm_err_fwrite(&r->p->capacity, 4, 1, p->fp_split); mm_err_fwrite(r->p, r->p->capacity, 4, p->fp_split); } } } else if (s->n_reg[i] > 0) { // the query has at least one hit for (j = 0; j < s->n_reg[i]; ++j) { mm_reg1_t *r = &s->reg[i][j]; assert(!r->sam_pri || r->id == r->parent); if ((p->opt->flag & MM_F_NO_PRINT_2ND) && r->id != r->parent) continue; if (p->opt->flag & MM_F_OUT_SAM) mm_write_sam3(&p->str, mi, t, i - seg_st, j, s->n_seg[k], &s->n_reg[seg_st], (const mm_reg1_t*const*)&s->reg[seg_st], km, p->opt->flag, s->rep_len[i]); else mm_write_paf3(&p->str, mi, t, r, km, p->opt->flag, s->rep_len[i]); mm_err_puts(p->str.s); } } else if ((p->opt->flag & MM_F_PAF_NO_HIT) || ((p->opt->flag & MM_F_OUT_SAM) && !(p->opt->flag & MM_F_SAM_HIT_ONLY))) { // output an empty hit, if requested if (p->opt->flag & MM_F_OUT_SAM) mm_write_sam3(&p->str, mi, t, i - seg_st, -1, s->n_seg[k], &s->n_reg[seg_st], (const mm_reg1_t*const*)&s->reg[seg_st], km, p->opt->flag, s->rep_len[i]); else mm_write_paf3(&p->str, mi, t, 0, 0, p->opt->flag, s->rep_len[i]); mm_err_puts(p->str.s); } } for (i = seg_st; i < seg_en; ++i) { for (j = 0; j < s->n_reg[i]; ++j) free(s->reg[i][j].p); free(s->reg[i]); free(s->seq[i].seq); free(s->seq[i].name); if (s->seq[i].qual) free(s->seq[i].qual); if (s->seq[i].comment) free(s->seq[i].comment); } } free(s->reg); free(s->n_reg); free(s->seq); // seg_off, n_seg, rep_len and frag_gap were allocated with reg; no memory leak here km_destroy(km); if (mm_verbose >= 3) fprintf(stderr, "[M::%s::%.3f*%.2f] mapped %d sequences\n", __func__, realtime() - mm_realtime0, cputime() / (realtime() - mm_realtime0), s->n_seq); free(s); } return 0; } static mm_bseq_file_t **open_bseqs(int n, const char **fn) { mm_bseq_file_t **fp; int i, j; fp = (mm_bseq_file_t**)calloc(n, sizeof(mm_bseq_file_t*)); for (i = 0; i < n; ++i) { if ((fp[i] = mm_bseq_open(fn[i])) == 0) { if (mm_verbose >= 1) fprintf(stderr, "ERROR: failed to open file '%s': %s\n", fn[i], strerror(errno)); for (j = 0; j < i; ++j) mm_bseq_close(fp[j]); free(fp); return 0; } } return fp; } int mm_map_file_frag(const mm_idx_t *idx, int n_segs, const char **fn, const mm_mapopt_t *opt, int n_threads) { int i, pl_threads; pipeline_t pl; if (n_segs < 1) return -1; memset(&pl, 0, sizeof(pipeline_t)); pl.n_fp = n_segs; pl.fp = open_bseqs(pl.n_fp, fn); if (pl.fp == 0) return -1; pl.opt = opt, pl.mi = idx; pl.n_threads = n_threads > 1? n_threads : 1; pl.mini_batch_size = opt->mini_batch_size; if (opt->split_prefix) pl.fp_split = mm_split_init(opt->split_prefix, idx); pl_threads = n_threads == 1? 1 : (opt->flag&MM_F_2_IO_THREADS)? 3 : 2; pl_threads = 1; //TODO: this change helped avoid seg-faults on Phoenix cluster (figure out why) //GDB was indicating seg-faults in bseq.c kt_pipeline(pl_threads, worker_pipeline, &pl, 3); free(pl.str.s); if (pl.fp_split) fclose(pl.fp_split); for (i = 0; i < pl.n_fp; ++i) mm_bseq_close(pl.fp[i]); free(pl.fp); return 0; } int mm_map_file(const mm_idx_t *idx, const char *fn, const mm_mapopt_t *opt, int n_threads) { return mm_map_file_frag(idx, 1, &fn, opt, n_threads); } int mm_split_merge(int n_segs, const char **fn, const mm_mapopt_t *opt, int n_split_idx) { int i; pipeline_t pl; mm_idx_t *mi; if (n_segs < 1 || n_split_idx < 1) return -1; memset(&pl, 0, sizeof(pipeline_t)); pl.n_fp = n_segs; pl.fp = open_bseqs(pl.n_fp, fn); if (pl.fp == 0) return -1; pl.opt = opt; pl.mini_batch_size = opt->mini_batch_size; pl.n_parts = n_split_idx; pl.fp_parts = CALLOC(FILE*, pl.n_parts); pl.rid_shift = CALLOC(uint32_t, pl.n_parts); pl.mi = mi = mm_split_merge_prep(opt->split_prefix, n_split_idx, pl.fp_parts, pl.rid_shift); if (pl.mi == 0) { free(pl.fp_parts); free(pl.rid_shift); return -1; } for (i = n_split_idx - 1; i > 0; --i) pl.rid_shift[i] = pl.rid_shift[i - 1]; for (pl.rid_shift[0] = 0, i = 1; i < n_split_idx; ++i) pl.rid_shift[i] += pl.rid_shift[i - 1]; if (opt->flag & MM_F_OUT_SAM) for (i = 0; i < (int32_t)pl.mi->n_seq; ++i) printf("@SQ\tSN:%s\tLN:%d\n", pl.mi->seq[i].name, pl.mi->seq[i].len); kt_pipeline(2, worker_pipeline, &pl, 3); free(pl.str.s); mm_idx_destroy(mi); free(pl.rid_shift); for (i = 0; i < n_split_idx; ++i) fclose(pl.fp_parts[i]); free(pl.fp_parts); for (i = 0; i < pl.n_fp; ++i) mm_bseq_close(pl.fp[i]); free(pl.fp); mm_split_rm_tmp(opt->split_prefix, n_split_idx); return 0; }
oskar_convert_lon_lat_to_relative_directions.c
/* * Copyright (c) 2013-2015, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "convert/oskar_convert_lon_lat_to_relative_directions.h" #include "convert/oskar_convert_lon_lat_to_relative_directions_cuda.h" #include "convert/private_convert_lon_lat_to_relative_directions_inline.h" #include "utility/oskar_device_utils.h" #include <math.h> #ifdef __cplusplus extern "C" { #endif /* Single precision. */ void oskar_convert_lon_lat_to_relative_directions_f(int num_points, const float* lon_rad, const float* lat_rad, float lon0_rad, float lat0_rad, float* l, float* m, float* n) { int i; float sin_lat0, cos_lat0; sin_lat0 = (float) sin(lat0_rad); cos_lat0 = (float) cos(lat0_rad); #pragma omp parallel for private(i) for (i = 0; i < num_points; ++i) { oskar_convert_lon_lat_to_relative_directions_inline_f( lon_rad[i], lat_rad[i], lon0_rad, cos_lat0, sin_lat0, &l[i], &m[i], &n[i]); } } /* Double precision. */ void oskar_convert_lon_lat_to_relative_directions_d(int num_points, const double* lon_rad, const double* lat_rad, double lon0_rad, double lat0_rad, double* l, double* m, double* n) { int i; double sin_lat0, cos_lat0; sin_lat0 = sin(lat0_rad); cos_lat0 = cos(lat0_rad); #pragma omp parallel for private(i) for (i = 0; i < num_points; ++i) { oskar_convert_lon_lat_to_relative_directions_inline_d( lon_rad[i], lat_rad[i], lon0_rad, cos_lat0, sin_lat0, &l[i], &m[i], &n[i]); } } /* Single precision. */ void oskar_convert_lon_lat_to_relative_directions_2d_f(int num_points, const float* lon_rad, const float* lat_rad, float lon0_rad, float lat0_rad, float* l, float* m) { int i; float sin_lat0, cos_lat0; sin_lat0 = sinf(lat0_rad); cos_lat0 = cosf(lat0_rad); #pragma omp parallel for private(i) for (i = 0; i < num_points; ++i) { float cos_lat, sin_lat, sin_lon, cos_lon, rel_lon, p_lat, l_, m_; p_lat = lat_rad[i]; rel_lon = lon_rad[i]; rel_lon -= lon0_rad; sin_lon = sinf(rel_lon); cos_lon = cosf(rel_lon); sin_lat = sinf(p_lat); cos_lat = cosf(p_lat); l_ = cos_lat * sin_lon; m_ = cos_lat0 * sin_lat - sin_lat0 * cos_lat * cos_lon; l[i] = l_; m[i] = m_; } } /* Double precision. */ void oskar_convert_lon_lat_to_relative_directions_2d_d(int num_points, const double* lon_rad, const double* lat_rad, double lon0_rad, double lat0_rad, double* l, double* m) { int i; double sin_lat0, cos_lat0; sin_lat0 = sin(lat0_rad); cos_lat0 = cos(lat0_rad); #pragma omp parallel for private(i) for (i = 0; i < num_points; ++i) { double cos_lat, sin_lat, sin_lon, cos_lon, rel_lon, p_lat, l_, m_; p_lat = lat_rad[i]; rel_lon = lon_rad[i]; rel_lon -= lon0_rad; sin_lon = sin(rel_lon); cos_lon = cos(rel_lon); sin_lat = sin(p_lat); cos_lat = cos(p_lat); l_ = cos_lat * sin_lon; m_ = cos_lat0 * sin_lat - sin_lat0 * cos_lat * cos_lon; l[i] = l_; m[i] = m_; } } /* Wrapper. */ void oskar_convert_lon_lat_to_relative_directions(int num_points, const oskar_Mem* lon_rad, const oskar_Mem* lat_rad, double lon0_rad, double lat0_rad, oskar_Mem* l, oskar_Mem* m, oskar_Mem* n, int* status) { int type, location; /* Check if safe to proceed. */ if (*status) return; /* Get the meta-data. */ type = oskar_mem_type(lon_rad); location = oskar_mem_location(lon_rad); /* Check type consistency. */ if (oskar_mem_type(lat_rad) != type || oskar_mem_type(l) != type || oskar_mem_type(m) != type || oskar_mem_type(n) != type) { *status = OSKAR_ERR_TYPE_MISMATCH; return; } if (type != OSKAR_SINGLE && type != OSKAR_DOUBLE) { *status = OSKAR_ERR_BAD_DATA_TYPE; return; } /* Check location consistency. */ if (oskar_mem_location(lat_rad) != location || oskar_mem_location(l) != location || oskar_mem_location(m) != location || oskar_mem_location(n) != location) { *status = OSKAR_ERR_LOCATION_MISMATCH; return; } /* Check memory is allocated. */ if (!oskar_mem_allocated(lon_rad) || !oskar_mem_allocated(lat_rad)) { *status = OSKAR_ERR_MEMORY_NOT_ALLOCATED; return; } /* Check dimensions. */ if ((int)oskar_mem_length(lon_rad) < num_points || (int)oskar_mem_length(lat_rad) < num_points) { *status = OSKAR_ERR_DIMENSION_MISMATCH; return; } /* Resize output arrays if needed. */ if ((int)oskar_mem_length(l) < num_points) oskar_mem_realloc(l, num_points, status); if ((int)oskar_mem_length(m) < num_points) oskar_mem_realloc(m, num_points, status); if ((int)oskar_mem_length(n) < num_points) oskar_mem_realloc(n, num_points, status); /* Check if safe to proceed. */ if (*status) return; /* Convert coordinates. */ if (type == OSKAR_SINGLE) { const float *lon_, *lat_; float *l_, *m_, *n_; lon_ = oskar_mem_float_const(lon_rad, status); lat_ = oskar_mem_float_const(lat_rad, status); l_ = oskar_mem_float(l, status); m_ = oskar_mem_float(m, status); n_ = oskar_mem_float(n, status); if (location == OSKAR_GPU) { #ifdef OSKAR_HAVE_CUDA oskar_convert_lon_lat_to_relative_directions_cuda_f( num_points, lon_, lat_, (float)lon0_rad, (float)lat0_rad, l_, m_, n_); oskar_device_check_error(status); #else *status = OSKAR_ERR_CUDA_NOT_AVAILABLE; #endif } else { oskar_convert_lon_lat_to_relative_directions_f( num_points, lon_, lat_, (float)lon0_rad, (float)lat0_rad, l_, m_, n_); } } else { const double *lon_, *lat_; double *l_, *m_, *n_; lon_ = oskar_mem_double_const(lon_rad, status); lat_ = oskar_mem_double_const(lat_rad, status); l_ = oskar_mem_double(l, status); m_ = oskar_mem_double(m, status); n_ = oskar_mem_double(n, status); if (location == OSKAR_GPU) { #ifdef OSKAR_HAVE_CUDA oskar_convert_lon_lat_to_relative_directions_cuda_d( num_points, lon_, lat_, lon0_rad, lat0_rad, l_, m_, n_); oskar_device_check_error(status); #else *status = OSKAR_ERR_CUDA_NOT_AVAILABLE; #endif } else { oskar_convert_lon_lat_to_relative_directions_d( num_points, lon_, lat_, lon0_rad, lat0_rad, l_, m_, n_); } } } #ifdef __cplusplus } #endif
test.c
#include <barrelfish/barrelfish.h> #include <omp.h> #define ITERATIONS 10000 int main(int arc, char *argv[]) { debug_printf("Bomp New Test started\n"); debug_printf("==========================\n"); bomp_init(BOMP_THREADS_ALL); debug_printf("==========================\n"); debug_printf("==========================\n"); uint32_t array[10]; memset(array, 0, sizeof(array)); uint64_t counter = 0; #pragma omp parallel for for (uint32_t i = 0; i < ITERATIONS; ++i) { array[omp_get_thread_num()]++; if ((i % 5000) == 0) { debug_printf("loop %u\n", i); } counter++; } assert(counter == ITERATIONS); debug_printf("array: %u %u %u %u", array[0], array[1], array[2], array[3]); debug_printf("==========================\n"); debug_printf("==========================\n"); counter = 0; memset(array, 0, sizeof(array)); #pragma omp parallel for for (uint32_t i = 0; i < 10000; ++i) { array[omp_get_thread_num()]++; if ((i % 5000) == 0) { debug_printf("loop %u\n", i); } counter++; } debug_printf("array: %u %u %u %u", array[0], array[1], array[2], array[3]); assert(counter == ITERATIONS); debug_printf("==========================\n"); debug_printf("==========================\n"); counter = 0; memset(array, 0, sizeof(array)); #pragma omp parallel for for (uint32_t i = 0; i < 12; ++i) { #pragma omp parallel for for (uint32_t j = 0; j < 10; ++j) { debug_printf("loop %u.%u\n", i, j); array[omp_get_thread_num()]++; __sync_fetch_and_add(&counter, 1); } } debug_printf("array: %u %u %u %u", array[0], array[1], array[2], array[3]); if (counter != 120) { debug_printf("%lu %u\n\n", counter, 120); } while(1) ; assert(counter == 120); debug_printf("==========================\n"); debug_printf("==========================\n"); debug_printf("==========================\n"); debug_printf("Bomp New Test terminated\n"); while(1) ; return 0; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
opt-record-1.c
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -target-cpu x86-64 %s -O3 -opt-record-file=t1.opt -fopenmp -emit-llvm-bc -o %t.bc // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -target-cpu x86-64 -O3 -x ir %t.bc -opt-record-file %t.opt -fopenmp -emit-obj // RUN: cat %t.opt | FileCheck -check-prefix=CHECK %s // REQUIRES: x86-registered-target void foo(int *a, int *b, int *c) { #pragma omp parallel for for (int i = 0; i < 100; i++) { a[i] = b[i] + c[i]; } } // CHECK: --- !Missed // CHECK: Pass: inline // CHECK: Name: NoDefinition // CHECK: Function: foo
DRB008-indirectaccess4-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two pointers have a distance of 12 (xa2 - xa1 = 12). They are used as base addresses for indirect array accesses using an index set (another array). The index set has two indices with distance of 12 : indexSet[1]- indexSet[0] = 533 - 521 = 12 So xa1[idx] and xa2[idx] may cause loop carried dependence for N=0 and N=3. We use the default loop scheduling (static even) in OpenMP. It is possible that two dependent iterations will be scheduled within a same chunk to a same thread. So there is no runtime data races. N is 180, two iteraions with N=0 and N= 1 have loop carried dependences. For static even scheduling, we must have at least 180 threads (180/180=1 iterations) so iteration 0 and 1 will be scheduled to two different threads. Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5 */ #include "omprace.h" #include <omp.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 533, 525, 527, 529, 531, // 521+12=533 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 921, 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { omprace_init(); double * base = (double*) malloc(sizeof(double)* (2013+12+1)); if (base == 0) { printf ("Error in malloc(). Aborting ...\n"); return 1; } double * xa1 = base; double * xa2 = xa1 + 12; int i; // initialize segments touched by indexSet for (i =521; i<= 2025; ++i) { base[i]=0.5*i; } //#pragma omp parallel for // default static even scheduling may not trigger data race! #pragma omp parallel for schedule(dynamic,1)// default static even scheduling may not trigger data race! for (i =0; i< N; ++i) { int idx = indexSet[i]; xa1[idx]+= 1.0; xa2[idx]+= 3.0; } printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]); free (base); omprace_fini(); return 0; }
pbkdf2-hmac-sha512_fmt_plug.c
/* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Based on hmac-sha512 by magnum * * Minor fixes, format unification and OMP support done by Dhiru Kholia * <dhiru@openwall.com> * * Fixed for supporting $ml$ "dave" format as well as GRUB native format by * magnum 2013. Note: We support a binary size of >512 bits (64 bytes / 128 * chars of hex) but we currently do not calculate it even in cmp_exact(). The * chance for a 512-bit hash collision should be pretty dang slim. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pbkdf2_hmac_sha512; #elif FMT_REGISTERS_H john_register_one(&fmt_pbkdf2_hmac_sha512); #else #include <ctype.h> #include <string.h> #include <assert.h> #include <stdint.h> #include "misc.h" #include "arch.h" #include "common.h" #include "formats.h" #include "sha2.h" #include "johnswap.h" #include "pbkdf2_hmac_common.h" #include "pbkdf2_hmac_sha512.h" #define FORMAT_LABEL "PBKDF2-HMAC-SHA512" #undef FORMAT_NAME #define FORMAT_NAME "GRUB2 / OS X 10.8+" #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "PBKDF2-SHA512 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #define SALT_SIZE sizeof(struct custom_salt) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define PAD_SIZE 128 #define PLAINTEXT_LENGTH 125 static struct custom_salt { uint8_t length; uint8_t salt[PBKDF2_64_MAX_SALT_SIZE]; uint32_t rounds; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[PBKDF2_SHA512_BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *p; int saltlen; char delim; memset(&cs, 0, sizeof(cs)); ciphertext += PBKDF2_SHA512_TAG_LEN; cs.rounds = atou(ciphertext); delim = strchr(ciphertext, '.') ? '.' : '$'; ciphertext = strchr(ciphertext, delim) + 1; p = strchr(ciphertext, delim); saltlen = 0; while (ciphertext < p) { /** extract salt **/ cs.salt[saltlen++] = atoi16[ARCH_INDEX(ciphertext[0])] * 16 + atoi16[ARCH_INDEX(ciphertext[1])]; ciphertext += 2; } cs.length = saltlen; return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; union { uint32_t *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_SHA512_BINARY_SIZE, 0); #else pbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_SHA512_BINARY_SIZE, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], PBKDF2_SHA512_BINARY_SIZE); } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int cmp_exact(char *source, int index) { return pbkdf2_hmac_sha512_cmp_exact(get_key(index), source, cur_salt->salt, cur_salt->length, cur_salt->rounds); } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->rounds; } struct fmt_main fmt_pbkdf2_hmac_sha512 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, PBKDF2_SHA512_BINARY_SIZE, sizeof(uint32_t), SALT_SIZE, sizeof(ARCH_WORD), MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, { "iteration count", }, { PBKDF2_SHA512_FORMAT_TAG, FORMAT_TAG_ML, FORMAT_TAG_GRUB }, pbkdf2_hmac_sha512_common_tests }, { init, done, fmt_default_reset, pbkdf2_hmac_sha512_prepare, pbkdf2_hmac_sha512_valid, pbkdf2_hmac_sha512_split, pbkdf2_hmac_sha512_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
parallel_partition.h
#ifndef AQSORT_IMPL_PARALLEL_PARTITION_H #define AQSORT_IMPL_PARALLEL_PARTITION_H #include <omp.h> #include <cassert> #include <cstddef> #include "sequential_partition.h" namespace aqsort { namespace impl { // alias for block size static const std::size_t B = AQSORT_PARALLEL_PARTITION_BLOCK_SIZE; template<typename Comp, typename Swap> std::size_t parallel_partition(const std::size_t start, const std::size_t n, const std::size_t pivot, Comp* const comp, Swap* const swap, const std::size_t P) { // avoid unnecessary calling performance penalty assert (n > 0); // do not call for single thread only assert (P > 1); // number of blocks const std::size_t m = n / B; // must not be called for small number of elements assert (m >= P); // auxiliary data std::size_t threads_temp[2 * P + 1]; std::size_t* const threads_left = threads_temp; std::size_t* const threads_start = threads_temp + P; std::size_t* const threads_end = threads_temp + P + 1; for (std::size_t p = 0; p < P; p++) threads_start[p] = start + B * (p * m / P); threads_start[P] = start + B * m; // auxiliary data std::size_t less_than; std::size_t left_thread, right_thread; std::size_t global_left, global_right; // must be called by single thread only #pragma omp parallel num_threads(P) default(none) \ firstprivate(comp, swap, threads_left, threads_start, threads_end) \ shared(less_than, left_thread, right_thread, global_left, global_right) { const std::size_t p = omp_get_thread_num(); // partitioning thread parts std::size_t left = threads_start[p]; std::size_t right = threads_start[p + 1] - 1; assert(right >= left); std::size_t seq_n = right - left + 1; threads_left[p] = left; if (seq_n > 0) threads_left[p] += sequential_partition(left, seq_n, pivot, comp, swap); // neutralize grey blocks #pragma omp barrier #pragma omp single { std::size_t i = 0; std::size_t j = P - 1; while (i < j) { // check if no grey blocks std::size_t mod_i = (threads_left[i] - start) % B; std::size_t mod_j = (threads_left[j] - start) % B; if (mod_i == 0) { i++; continue; } if (mod_j == 0) { j--; continue; } std::size_t i_last = threads_left[i] - mod_i + B - 1; std::size_t j_first = threads_left[j] - mod_j; while ((threads_left[i] <= i_last) && (threads_left[j] - 1 >= j_first)) { (*swap)(threads_left[i], threads_left[j] - 1); threads_left[i]++; threads_left[j]--; } } // find pivot position less_than = 0; for (std::size_t k = 0; k < P; k++) less_than += threads_left[k] - threads_start[k]; // place the remaining grey block to its final position if exists and not already in place std::size_t temp = (threads_left[i] - start) % B; if ((temp != 0) && ((start + less_than < threads_left[i] - temp) || (start + less_than >= threads_left[i] - temp + B))) { if ((*comp)(start + less_than, pivot)) { std::size_t count = B - temp; while (count > 0) { (*swap)(threads_left[i], start + less_than + count - 1); threads_left[i]++; count--; } } else { std::size_t count = temp; while (count > 0) { (*swap)(threads_left[i] - 1, start + less_than - count); threads_left[i]--; count--; } } } left_thread = 0; right_thread = P - 1; global_left = threads_left[0]; global_right = threads_left[P - 1]; } // implicit barrier // swap misplaced blocks in parallel bool done = false; while (true) { std::size_t my_left, my_right; // find next blocks to be swapped #pragma omp critical { if ((global_left >= global_right) || (global_left >= (start + less_than)) || (global_right - B < (start + less_than))) { done = true; } else { while (global_left >= threads_end[left_thread]) { if (++left_thread >= P) { done = true; break; } global_left = threads_left[left_thread]; } my_left = global_left; global_left += B; // check underflow while ((global_right > B) && (global_right - B < threads_start[right_thread])) { if (right_thread == 0) { done = true; break; } global_right = threads_left[--right_thread]; } if (global_right <= B) { done = true; } else { my_right = global_right - B; global_right -= B; } } } if (done) break; // swap blocks if (my_left < my_right) for (std::size_t k = 0; k < B; k++) (*swap)(my_left + k, my_right + k); } #pragma omp barrier // process the remainder #pragma omp single { for (std::size_t k = threads_end[P - 1]; k < start + n; k++) { if ((*comp)(k, pivot)) { (*swap)(k, start + less_than); less_than++; } } } } // end of parallel region return less_than; } } } #endif
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class AddrLabelExpr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class Stmt; /// The statement class. unsigned sClass : 8; /// This bit is set only for the Stmts that are the structured-block of /// OpenMP executable directives. Directives that have a structured block /// are called "non-standalone" directives. /// I.e. those returned by OMPExecutableDirective::getStructuredBlock(). unsigned IsOMPStructuredBlock : 1; }; enum { NumStmtBits = 9 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = NumStmtBits + 9 }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; unsigned NonOdrUseReason : 2; /// The location of the declaration name itself. SourceLocation Loc; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArraySubscriptExprBitfields { friend class ArraySubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class ASTStmtReader; friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// Value of type NonOdrUseReason indicating why this MemberExpr does /// not constitute an odr-use of the named declaration. Meaningful only /// when naming a static member. unsigned NonOdrUseReason : 2; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types and 0 otherwise. unsigned FPFeatures : 3; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class SourceLocExprBitfields { friend class ASTStmtReader; friend class SourceLocExpr; unsigned : NumExprBits; /// The kind of source location builtin represented by the SourceLocExpr. /// Ex. __builtin_LINE, __builtin_FUNCTION, ect. unsigned Kind : 2; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 3; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArraySubscriptExprBitfields ArraySubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; SourceLocExprBitfields SourceLocExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast_or_null<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt() = delete; Stmt(const Stmt &) = delete; Stmt(Stmt &&) = delete; Stmt &operator=(const Stmt &) = delete; Stmt &operator=(Stmt &&) = delete; Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; StmtBits.IsOMPStructuredBlock = false; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; } void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) { StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock; } /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// Pretty-prints in JSON format. void printJson(raw_ostream &Out, PrinterHelper *Helper, const PrintingPolicy &Policy, bool AddQuotes) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } const_child_range children() const { auto Children = const_cast<DeclStmt *>(this)->children(); return const_child_range(Children); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); body_begin()[size() - 1] = S; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } const_child_range children() const { return const_child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } const_child_range children() const { return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } const_child_range children() const { return const_child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } const_child_range children() const { if (RetExpr) return const_child_range(&RetExpr, &RetExpr + 1); return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; unsigned NumLabels = 0; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, unsigned numlabels, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } //===--- Labels ---===// bool isAsmGoto() const { return NumLabels > 0; } unsigned getNumLabels() const { return NumLabels; } IdentifierInfo *getLabelIdentifier(unsigned i) const { return Names[i + NumInputs]; } AddrLabelExpr *getLabelExpr(unsigned i) const; StringRef getLabelName(unsigned i) const; using labels_iterator = CastIterator<AddrLabelExpr>; using const_labels_iterator = ConstCastIterator<AddrLabelExpr>; using labels_range = llvm::iterator_range<labels_iterator>; using labels_const_range = llvm::iterator_range<const_labels_iterator>; labels_iterator begin_labels() { return &Exprs[0] + NumInputs; } labels_iterator end_labels() { return &Exprs[0] + NumInputs + NumLabels; } labels_range labels() { return labels_range(begin_labels(), end_labels()); } const_labels_iterator begin_labels() const { return &Exprs[0] + NumInputs; } const_labels_iterator end_labels() const { return &Exprs[0] + NumInputs + NumLabels; } labels_const_range labels() const { return labels_const_range(begin_labels(), end_labels()); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, unsigned NumLabels, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } const_child_range children() const { return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } const_child_range children() const { return const_child_range(&Block, &Block + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } const_child_range children() const { return const_child_range(Children, Children + 2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); const_child_range children() const; }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
GB_unop__identity_fp64_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_int16) // op(A') function: GB (_unop_tran__identity_fp64_int16) // C type: double // A type: int16_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_int16) ( double *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
halos.c
#include "../comms.h" #include "../mesh.h" // Enforce reflective boundary conditions on the problem state void handle_boundary_2d(const int nx, const int ny, Mesh* mesh, double* arr, const int invert, const int pack) { START_PROFILING(&comms_profile); const int pad = mesh->pad; int* neighbours = mesh->neighbours; #ifdef MPI double* east_buffer_out = mesh->east_buffer_out; double* east_buffer_in = mesh->east_buffer_in; double* west_buffer_out = mesh->west_buffer_out; double* west_buffer_in = mesh->west_buffer_in; double* north_buffer_out = mesh->north_buffer_out; double* north_buffer_in = mesh->north_buffer_in; double* south_buffer_out = mesh->south_buffer_out; double* south_buffer_in = mesh->south_buffer_in; int nmessages = 0; if (pack) { // Pack east and west if (neighbours[EAST] != EDGE) { #pragma omp target teams distribute parallel for collapse(2) for (int ii = pad; ii < ny - pad; ++ii) { for (int dd = 0; dd < pad; ++dd) { east_buffer_out[(ii - pad) * pad + dd] = arr[(ii * nx) + (nx - 2 * pad + dd)]; } } copy_buffer(pad * ny, &east_buffer_out, &east_buffer_out, RECV); non_block_send(east_buffer_out, (ny - 2 * pad) * pad, neighbours[EAST], 2, nmessages++); non_block_recv(east_buffer_in, (ny - 2 * pad) * pad, neighbours[EAST], 3, nmessages++); } if (neighbours[WEST] != EDGE) { #pragma omp target teams distribute parallel for collapse(2) for (int ii = pad; ii < ny - pad; ++ii) { for (int dd = 0; dd < pad; ++dd) { west_buffer_out[(ii - pad) * pad + dd] = arr[(ii * nx) + (pad + dd)]; } } copy_buffer(pad * ny, &west_buffer_out, &west_buffer_out, RECV); non_block_send(west_buffer_out, (ny - 2 * pad) * pad, neighbours[WEST], 3, nmessages++); non_block_recv(west_buffer_in, (ny - 2 * pad) * pad, neighbours[WEST], 2, nmessages++); } // Pack north and south if (neighbours[NORTH] != EDGE) { #pragma omp target teams distribute parallel for collapse(2) for (int dd = 0; dd < pad; ++dd) { for (int jj = pad; jj < nx - pad; ++jj) { north_buffer_out[dd * (nx - 2 * pad) + (jj - pad)] = arr[(ny - 2 * pad + dd) * nx + jj]; } } copy_buffer(nx * pad, &north_buffer_out, &north_buffer_out, RECV); non_block_send(north_buffer_out, (nx - 2 * pad) * pad, neighbours[NORTH], 1, nmessages++); non_block_recv(north_buffer_in, (nx - 2 * pad) * pad, neighbours[NORTH], 0, nmessages++); } if (neighbours[SOUTH] != EDGE) { #pragma omp target teams distribute parallel for collapse(2) for (int dd = 0; dd < pad; ++dd) { for (int jj = pad; jj < nx - pad; ++jj) { south_buffer_out[dd * (nx - 2 * pad) + (jj - pad)] = arr[(pad + dd) * nx + jj]; } } copy_buffer(nx * pad, &south_buffer_out, &south_buffer_out, RECV); non_block_send(south_buffer_out, (nx - 2 * pad) * pad, neighbours[SOUTH], 0, nmessages++); non_block_recv(south_buffer_in, (nx - 2 * pad) * pad, neighbours[SOUTH], 1, nmessages++); } wait_on_messages(nmessages); // Unpack east and west if (neighbours[WEST] != EDGE) { copy_buffer(pad * ny, &west_buffer_in, &west_buffer_in, SEND); #pragma omp target teams distribute parallel for collapse(2) for (int ii = pad; ii < ny - pad; ++ii) { for (int dd = 0; dd < pad; ++dd) { arr[ii * nx + dd] = west_buffer_in[(ii - pad) * pad + dd]; } } } if (neighbours[EAST] != EDGE) { copy_buffer(pad * ny, &east_buffer_in, &east_buffer_in, SEND); #pragma omp target teams distribute parallel for collapse(2) for (int ii = pad; ii < ny - pad; ++ii) { for (int dd = 0; dd < pad; ++dd) { arr[ii * nx + (nx - pad + dd)] = east_buffer_in[(ii - pad) * pad + dd]; } } } // Unpack north and south if (neighbours[NORTH] != EDGE) { copy_buffer(nx * pad, &north_buffer_in, &north_buffer_in, SEND); #pragma omp target teams distribute parallel for collapse(2) for (int dd = 0; dd < pad; ++dd) { for (int jj = pad; jj < nx - pad; ++jj) { arr[(ny - pad + dd) * nx + jj] = north_buffer_in[dd * (nx - 2 * pad) + (jj - pad)]; } } } if (neighbours[SOUTH] != EDGE) { copy_buffer(nx * pad, &south_buffer_in, &south_buffer_in, SEND); #pragma omp target teams distribute parallel for collapse(2) for (int dd = 0; dd < pad; ++dd) { for (int jj = pad; jj < nx - pad; ++jj) { arr[dd * nx + jj] = south_buffer_in[dd * (nx - 2 * pad) + (jj - pad)]; } } } } #endif // Perform the boundary reflections, potentially with the data updated from // neighbours double x_inversion_coeff = (invert == INVERT_X) ? -1.0 : 1.0; double y_inversion_coeff = (invert == INVERT_Y) ? -1.0 : 1.0; // Reflect at the north if (neighbours[NORTH] == EDGE) { #pragma omp target teams distribute parallel for collapse(2) for (int dd = 0; dd < pad; ++dd) { for (int jj = pad; jj < nx - pad; ++jj) { arr[(ny - pad + dd) * nx + jj] = y_inversion_coeff * arr[(ny - 1 - pad - dd) * nx + jj]; } } } // reflect at the south if (neighbours[SOUTH] == EDGE) { #pragma omp target teams distribute parallel for collapse(2) for (int dd = 0; dd < pad; ++dd) { for (int jj = pad; jj < nx - pad; ++jj) { arr[(pad - 1 - dd) * nx + jj] = y_inversion_coeff * arr[(pad + dd) * nx + jj]; } } } // reflect at the east if (neighbours[EAST] == EDGE) { #pragma omp target teams distribute parallel for collapse(2) for (int ii = pad; ii < ny - pad; ++ii) { for (int dd = 0; dd < pad; ++dd) { arr[ii * nx + (nx - pad + dd)] = x_inversion_coeff * arr[ii * nx + (nx - 1 - pad - dd)]; } } } if (neighbours[WEST] == EDGE) { // reflect at the west #pragma omp target teams distribute parallel for collapse(2) for (int ii = pad; ii < ny - pad; ++ii) { for (int dd = 0; dd < pad; ++dd) { arr[ii * nx + (pad - 1 - dd)] = x_inversion_coeff * arr[ii * nx + (pad + dd)]; } } } STOP_PROFILING(&comms_profile, __func__); } // Reflect the node centered velocities on the boundary void handle_unstructured_reflect(const int nnodes, const int* boundary_index, const int* boundary_type, const double* boundary_normal_x, const double* boundary_normal_y, double* velocity_x, double* velocity_y) { #pragma omp target teams distribute parallel for for (int nn = 0; nn < nnodes; ++nn) { const int index = boundary_index[(nn)]; if (index == IS_INTERIOR) { continue; } if (boundary_type[(index)] == IS_BOUNDARY) { // Project the velocity onto the face direction const double boundary_parallel_x = boundary_normal_y[(index)]; const double boundary_parallel_y = -boundary_normal_x[(index)]; const double vel_dot_parallel = (velocity_x[(nn)] * boundary_parallel_x + velocity_y[(nn)] * boundary_parallel_y); velocity_x[(nn)] = boundary_parallel_x * vel_dot_parallel; velocity_y[(nn)] = boundary_parallel_y * vel_dot_parallel; } else if (boundary_type[(index)] == IS_CORNER) { velocity_x[(nn)] = 0.0; velocity_y[(nn)] = 0.0; } } } // Reflect the node centered velocities on the boundary void handle_unstructured_reflect_3d(const int nnodes, const int* boundary_index, const int* boundary_type, const double* boundary_normal_x, const double* boundary_normal_y, const double* boundary_normal_z, double* velocity_x, double* velocity_y, double* velocity_z) { #pragma omp target teams distribute parallel for for (int nn = 0; nn < nnodes; ++nn) { const int index = boundary_index[(nn)]; if (index == IS_INTERIOR) { continue; } if (boundary_type[(index)] == IS_EDGE) { // The normal here isn't actually a normal but a projection vector const double ab = (velocity_x[(nn)] * boundary_normal_x[(index)] + velocity_y[(nn)] * boundary_normal_y[(index)] + velocity_z[(nn)] * boundary_normal_z[(index)]); // Project the vector onto the edge line velocity_x[(nn)] = ab * boundary_normal_x[(index)]; velocity_y[(nn)] = ab * boundary_normal_y[(index)]; velocity_z[(nn)] = ab * boundary_normal_z[(index)]; } else if (boundary_type[(index)] == IS_BOUNDARY) { // Perform an orthogonal projection, assuming normal vector is normalised const double un = (velocity_x[(nn)] * boundary_normal_x[(index)] + velocity_y[(nn)] * boundary_normal_y[(index)] + velocity_z[(nn)] * boundary_normal_z[(index)]); velocity_x[(nn)] -= un * boundary_normal_x[(index)]; velocity_y[(nn)] -= un * boundary_normal_y[(index)]; velocity_z[(nn)] -= un * boundary_normal_z[(index)]; } else if (boundary_type[(index)] == IS_CORNER) { velocity_x[(nn)] = 0.0; velocity_y[(nn)] = 0.0; velocity_z[(nn)] = 0.0; } } }
cmfrec.h
/******************************************************************************* Collective Matrix Factorization ------------------------------- This is a module for multi-way factorization of sparse and dense matrices intended to be used for recommender system with explicit feedback data plus side information about users and/or items. The reference papers are: (a) Cortes, David. "Cold-start recommendations in Collective Matrix Factorization." arXiv preprint arXiv:1809.00366 (2018). (b) Singh, Ajit P., and Geoffrey J. Gordon. "Relational learning via collective matrix factorization." Proceedings of the 14th ACM SIGKDD international conference on Knowledge discovery and data mining. 2008. (c) Hu, Yifan, Yehuda Koren, and Chris Volinsky. "Collaborative filtering for implicit feedback datasets." 2008 Eighth IEEE International Conference on Data Mining. Ieee, 2008. (d) Takacs, Gabor, Istvan Pilaszy, and Domonkos Tikk. "Applications of the conjugate gradient method for implicit feedback collaborative filtering." Proceedings of the fifth ACM conference on Recommender systems. 2011. (e) Rendle, Steffen, Li Zhang, and Yehuda Koren. "On the difficulty of evaluating baselines: A study on recommender systems." arXiv preprint arXiv:1905.01395 (2019). (f) Franc, Vojtech, Vaclav Hlavac, and Mirko Navara. "Sequential coordinate-wise algorithm for the non-negative least squares problem." International Conference on Computer Analysis of Images and Patterns. Springer, Berlin, Heidelberg, 2005. (g) Zhou, Yunhong, et al. "Large-scale parallel collaborative filtering for the netflix prize." International conference on algorithmic applications in management. Springer, Berlin, Heidelberg, 2008. For information about the models offered here and how they are fit to the data, see the files 'collective.c' and 'offsets.c'. Written for C99 standard and OpenMP version 2.0 or higher, and aimed to be used either as a stand-alone program, or wrapped into scripting languages such as Python and R. <https://www.github.com/david-cortes/cmfrec> MIT License: Copyright (c) 2020-2021 David Cortes All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *******************************************************************************/ #ifdef __cplusplus extern "C" { #endif #include <stddef.h> #include <limits.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <math.h> #include <float.h> #include <stdint.h> #include <inttypes.h> #ifndef _FOR_R #include <stdio.h> #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() (0) #endif #include <signal.h> typedef void (*sig_t_)(int); #ifdef _FOR_PYTHON /* This contains the standard cblas.h header */ #ifdef USE_FINDBLAS #include "findblas.h" /* https://www.github.com/david-cortes/findblas */ #endif #if defined(HAS_OPENBLAS) || defined(HAS_ATLAS) #ifndef AVOID_BLAS_SYR #define AVOID_BLAS_SYR #endif #endif #include <stdarg.h> #include <stdio.h> #ifdef _WIN32 #define IMPORTED_FUN __declspec(dllimport) #else #define IMPORTED_FUN #endif extern IMPORTED_FUN void PySys_WriteStdout(const char *fmt, ...); extern IMPORTED_FUN void PySys_WriteStderr(const char *fmt, ...); void python_printmsg(char *msg); void python_printerrmsg(char *msg); void py_printf(const char *fmt, ...); void py_errprintf(void *ignored, const char *fmt, ...); extern void cy_printf(char *msg); extern void cy_errprintf(char *msg); #define printf py_printf #define fprintf py_errprintf #define fflush(arg) {} #elif defined(_FOR_R) #include <Rconfig.h> #include <R.h> #include <Rinternals.h> #include <R_ext/Print.h> #include <R_ext/BLAS.h> #include <R_ext/Lapack.h> #include <R_ext/Visibility.h> #define USE_DOUBLE #define printf Rprintf #define fprintf(f, message) REprintf(message) #define fflush(f) R_FlushConsole() #elif defined(MKL_ILP64) #include "mkl.h" #endif /* Here one may also include the standard headers "cblas.h" and "lapack.h", if one wants to use a non-standard version such as ILP64 (-DMKL_ILP64). */ #if !defined(_FOR_R) && !defined(_FOR_PYTHON) #include <stdio.h> #endif #ifndef FCONE #define FCONE #endif /* Aliasing for compiler optimizations */ #ifdef __cplusplus #if defined(__GNUG__) || defined(__GNUC__) || defined(_MSC_VER) || defined(__clang__) || defined(__INTEL_COMPILER) || defined(__IBMCPP__) || defined(__ibmxl__) #define restrict __restrict #else #define restrict #endif #elif defined(_MSC_VER) #define restrict __restrict #elif !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) #define restrict #endif /* OpenMP < 3.0 (e.g. MSVC as of 2020) does not support parallel for's with unsigned iterators, and does not support declaring the iterator type in the loop itself */ #ifdef _OPENMP #if (_OPENMP < 200801) || defined(_WIN32) || defined(_WIN64) /* OpenMP < 3.0 */ #define size_t_for #else #define size_t_for size_t #endif #else #define size_t_for size_t #endif #ifndef isnan #ifdef _isnan #define isnan _isnan #else #define isnan(x) ( (x) != (x) ) #endif #endif #ifdef _FOR_R #define NAN_ NA_REAL #else #define NAN_ NAN #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #if defined(_FOR_R) || defined(_FOR_PYTHON) || !defined(_WIN32) #define CMFREC_EXPORTABLE #else #ifdef CMFREC_COMPILE_TIME #define CMFREC_EXPORTABLE __declspec(dllexport) #else #define CMFREC_EXPORTABLE __declspec(dllimport) #endif #endif #if !defined(USE_FLOAT) #define LBFGS_FLOAT 64 #define real_t double #define exp_t exp #define log_t log #define fabs_t fabs #define fmax_t fmax #define sqrt_t sqrt #define fma_t fma #define EPSILON_T DBL_EPSILON #define HUGE_VAL_T HUGE_VAL #define cblas_tdot cblas_ddot #define cblas_tcopy cblas_dcopy #define cblas_taxpy cblas_daxpy #define cblas_tscal cblas_dscal #define cblas_tsyr cblas_dsyr #define cblas_tsyrk cblas_dsyrk #define cblas_tnrm2 cblas_dnrm2 #define cblas_tgemm cblas_dgemm #define cblas_tgemv cblas_dgemv #define cblas_tger cblas_dger #define cblas_tsymv cblas_dsymv #ifndef _FOR_R #define tlacpy_ dlacpy_ #define tposv_ dposv_ #define tpotrf_ dpotrf_ #define tpotrs_ dpotrs_ #define tgelsd_ dgelsd_ #else #define tlacpy_(a1, a2, a3, a4, a5, a6, a7) F77_CALL(dlacpy)((a1), (a2), (a3), (a4), (a5), (a6), (a7) FCONE) #define tposv_(a1, a2, a3, a4, a5, a6, a7, a8) F77_CALL(dposv)((a1), (a2), (a3), (a4), (a5), (a6), (a7), (a8) FCONE) #define tpotrf_(a1, a2, a3, a4, a5) F77_CALL(dpotrf)((a1), (a2), (a3), (a4), (a5) FCONE) #define tpotrs_(a1, a2, a3, a4, a5, a6, a7, a8) F77_CALL(dpotrs)((a1), (a2), (a3), (a4), (a5), (a6), (a7), (a8) FCONE) #define tgelsd_ F77_CALL(dgelsd) #endif #else #define LBFGS_FLOAT 32 #define real_t float #define exp_t expf #define log_t logf #define fmax_t fmaxf #define fabs_t fabsf #define sqrt_t sqrtf #define fma_t fmaf #define EPSILON_T FLT_EPSILON #define HUGE_VAL_T HUGE_VALF #define cblas_tdot cblas_sdot #define cblas_tcopy cblas_scopy #define cblas_taxpy cblas_saxpy #define cblas_tscal cblas_sscal #define cblas_tsyr cblas_ssyr #define cblas_tsyrk cblas_ssyrk #define cblas_tnrm2 cblas_snrm2 #define cblas_tgemm cblas_sgemm #define cblas_tgemv cblas_sgemv #define cblas_tger cblas_sger #define cblas_tsymv cblas_ssymv #define tlacpy_ slacpy_ #define tposv_ sposv_ #define tpotrf_ spotrf_ #define tpotrs_ spotrs_ #define tgelsd_ sgelsd_ #endif #ifndef isfinite #define isfinite(x) ((x) > (-HUGE_VAL_T) && (x) < HUGE_VAL_T) #endif #if !defined(USE_INT64) && !defined(MKL_ILP64) #define int_t int #else #define ILP64 #define int_t int64_t #endif #if (SIZE_MAX >= UINT64_MAX) #define rng_state_t uint64_t #define USE_XOSHIRO256 #else #define rng_state_t uint32_t #define USE_XOSHIRO128 #endif #if !defined(LAPACK_H) && !defined(_FOR_R) void tposv_(const char*, const int_t*, const int_t*, real_t*, const int_t*, real_t*, const int_t*, int_t*); void tlacpy_(const char*, const int_t*, const int_t*, const real_t*, const int_t*, real_t*, const int_t*); void tpotrf_(const char*, const int_t*, real_t*, const int_t*, int_t*); void tpotrs_(const char*, const int_t*, const int_t*, const real_t*, const int_t*, real_t*, const int_t*, int_t*); void tgelsd_(const int_t*, const int_t*, const int_t*, real_t*, const int_t*, real_t*, const int_t*, real_t*, const real_t*, int_t*, real_t*, const int_t*, int_t*, int_t*); #endif #ifndef CBLAS_H typedef enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102} CBLAS_ORDER; typedef enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113, CblasConjNoTrans=114} CBLAS_TRANSPOSE; typedef enum CBLAS_UPLO {CblasUpper=121, CblasLower=122} CBLAS_UPLO; typedef CBLAS_ORDER CBLAS_LAYOUT; #if !(defined(_FOR_PYTHON) && !defined(USE_FINDBLAS)) real_t cblas_tdot(const int_t n, const real_t *x, const int_t incx, const real_t *y, const int_t incy); void cblas_tcopy(const int_t n, const real_t *x, const int_t incx, real_t *y, const int_t incy); void cblas_taxpy(const int_t n, const real_t alpha, const real_t *x, const int_t incx, real_t *y, const int_t incy); void cblas_tscal(const int_t N, const real_t alpha, real_t *X, const int_t incX); void cblas_tsyr(const CBLAS_ORDER order, const CBLAS_UPLO Uplo, const int_t N, const real_t alpha, const real_t *X, const int_t incX, real_t *A, const int_t lda); void cblas_tsyrk(const CBLAS_ORDER Order, const CBLAS_UPLO Uplo, const CBLAS_TRANSPOSE Trans, const int_t N, const int_t K, const real_t alpha, const real_t *A, const int_t lda, const real_t beta, real_t *C, const int_t ldc); real_t cblas_tnrm2 (const int_t N, const real_t *X, const int_t incX); void cblas_tgemm(const CBLAS_ORDER Order, const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int_t M, const int_t N, const int_t K, const real_t alpha, const real_t *A, const int_t lda, const real_t *B, const int_t ldb, const real_t beta, real_t *C, const int_t ldc); void cblas_tgemv(const CBLAS_ORDER order, const CBLAS_TRANSPOSE trans, const int_t m, const int_t n, const real_t alpha, const real_t *a, const int_t lda, const real_t *x, const int_t incx, const real_t beta, real_t *y, const int_t incy); void cblas_tsymv(const CBLAS_ORDER order, const CBLAS_UPLO Uplo, const int_t N, const real_t alpha, const real_t *A, const int_t lda, const real_t *X, const int_t incX, const real_t beta, real_t *Y, const int_t incY); void cblas_tger(const CBLAS_ORDER order, const int_t m, const int_t n, const real_t alpha, const real_t *x, const int_t incx, const real_t *y, const int_t incy, real_t *a, const int_t lda); #else real_t cblas_tdot(const int_t n, const real_t *x, const int_t incx, const real_t *y, const int_t incy); void cblas_tcopy(const int_t n, const real_t *x, const int_t incx, real_t *y, const int_t incy); void cblas_taxpy(const int_t n, const real_t alpha, const real_t *x, const int_t incx, real_t *y, const int_t incy); void cblas_tscal(const int_t N, const real_t alpha, real_t *X, const int_t incX); void cblas_tsyr(const int order, const int Uplo, const int_t N, const real_t alpha, const real_t *X, const int_t incX, real_t *A, const int_t lda); void cblas_tsyrk(const int Order, const int Uplo, const int Trans, const int_t N, const int_t K, const real_t alpha, const real_t *A, const int_t lda, const real_t beta, real_t *C, const int_t ldc); real_t cblas_tnrm2 (const int_t N, const real_t *X, const int_t incX); void cblas_tgemm(const int Order, const int TransA, const int TransB, const int_t M, const int_t N, const int_t K, const real_t alpha, const real_t *A, const int_t lda, const real_t *B, const int_t ldb, const real_t beta, real_t *C, const int_t ldc); void cblas_tgemv(const int order, const int trans, const int_t m, const int_t n, const real_t alpha, const real_t *a, const int_t lda, const real_t *x, const int_t incx, const real_t beta, real_t *y, const int_t incy); void cblas_tsymv(const int order, const int Uplo, const int_t N, const real_t alpha, const real_t *A, const int_t lda, const real_t *X, const int_t incX, const real_t beta, real_t *Y, const int_t incY); void cblas_tger(const int order, const int_t m, const int_t n, const real_t alpha, const real_t *x, const int_t incx, const real_t *y, const int_t incy, real_t *a, const int_t lda); #endif void openblas_set_num_threads(int); int openblas_get_num_threads(void); #endif #if defined(_FOR_R) && defined(WRAPPED_GELSD) && !defined(USE_FLOAT) typedef struct Args_to_GELSD { int *m; int *n; int *nrhs; real_t *A; int *lda; real_t *B; int *ldb; real_t *S; real_t *rcond; int *rank; real_t *work; int *lwork; int *iwork; int *info; } Args_to_GELSD; typedef struct PointersToFree { void **pointers; size_t n_pointers; } PointersToFree; extern bool GELSD_free_inputs; #endif #include "lbfgs.h" #define square(x) ( (x) * (x) ) #define max2(a, b) ((a) >= ((b))? (a) : (b)) #define min2(a, b) ((a) <= ((b))? (a) : (b)) #define cap_to_4(x) (((x) > 4)? 4 : (min2(x, 1))) #define set_to_zero(arr, n) memset((arr), 0, (size_t)(n)*sizeof(real_t)) #define copy_arr(from, to, n) memcpy((to), (from), (size_t)(n)*sizeof(real_t)) /* helpers.c */ typedef struct ArraysToFill { real_t *A; size_t sizeA; real_t *B; size_t sizeB; } ArraysToFill; void set_to_zero_(real_t *arr, const size_t n, int nthreads); void copy_arr_(real_t *restrict src, real_t *restrict dest, size_t n, int nthreads); int_t count_NAs(real_t arr[], size_t n, int nthreads); void count_NAs_by_row ( real_t *restrict arr, int_t m, int_t n, int_t *restrict cnt_NA, int nthreads, bool *restrict full_dense, bool *restrict near_dense, bool *restrict some_full ); void count_NAs_by_col ( real_t *restrict arr, int_t m, int_t n, int_t *restrict cnt_NA, bool *restrict full_dense, bool *restrict near_dense, bool *restrict some_full ); void sum_by_rows(real_t *restrict A, real_t *restrict outp, int_t m, int_t n, int nthreads); void sum_by_cols(real_t *restrict A, real_t *restrict outp, int_t m, int_t n, size_t lda, int nthreads); void mat_plus_rowvec(real_t *restrict A, real_t *restrict b, int_t m, int_t n, int nthreads); void mat_plus_colvec(real_t *restrict A, real_t *restrict b, real_t alpha, int_t m, int_t n, size_t lda, int nthreads); void mat_minus_rowvec2 ( real_t *restrict Xfull, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict b, int_t m, int_t n, int nthreads ); void mat_minus_colvec2 ( real_t *restrict Xfull, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict b, int_t m, int_t n, int nthreads ); void nan_to_zero(real_t *restrict arr, real_t *restrict comp, size_t n, int nthreads); void mult_if_non_nan(real_t *restrict arr, real_t *restrict comp, real_t *restrict w, size_t n, int nthreads); void mult_elemwise(real_t *restrict inout, real_t *restrict other, size_t n, int nthreads); real_t sum_squares(real_t *restrict arr, size_t n, int nthreads); void taxpy_large(real_t *restrict A, real_t x, real_t *restrict Y, size_t n, int nthreads); void tscal_large(real_t *restrict arr, real_t alpha, size_t n, int nthreads); void rnorm_xoshiro(real_t *seq, const size_t n, rng_state_t state[4]); void seed_state(int_t seed, rng_state_t state[4]); void fill_rnorm_buckets ( const size_t n_buckets, real_t *arr, const size_t n, real_t **ptr_bucket, size_t *sz_bucket, const size_t BUCKET_SIZE ); void rnorm_singlethread(ArraysToFill arrays, rng_state_t state[4]); int_t rnorm_parallel(ArraysToFill arrays, int_t seed, int nthreads); void reduce_mat_sum(real_t *restrict outp, size_t lda, real_t *restrict inp, int_t m, int_t n, int nthreads); void exp_neg_x(real_t *restrict arr, size_t n, int nthreads); void add_to_diag(real_t *restrict A, real_t val, size_t n); real_t sum_sq_div_w(real_t *restrict arr, real_t *restrict w, size_t n, bool compensated, int nthreads); void tgemm_sp_dense ( int_t m, int_t n, real_t alpha, size_t indptr[], int_t indices[], real_t values[], real_t DenseMat[], size_t ldb, real_t OutputMat[], size_t ldc, int nthreads ); void tgemv_dense_sp ( int_t m, int_t n, real_t alpha, real_t DenseMat[], size_t lda, int_t ixB[], real_t vec_sp[], size_t nnz, real_t OutputVec[] ); void tgemv_dense_sp_weighted ( int_t m, int_t n, real_t alpha[], real_t DenseMat[], size_t lda, int_t ixB[], real_t vec_sp[], size_t nnz, real_t OutputVec[] ); void tgemv_dense_sp_weighted2 ( int_t m, int_t n, real_t alpha[], real_t alpha2, real_t DenseMat[], size_t lda, int_t ixB[], real_t vec_sp[], size_t nnz, real_t OutputVec[] ); void tgemv_dense_sp_notrans ( int_t m, int_t n, real_t DenseMat[], int_t lda, int_t ixB[], real_t vec_sp[], size_t nnz, real_t OutputVec[] ); void copy_mat ( int_t m, int_t n, real_t *restrict A, int_t lda, real_t *restrict B, int_t ldb ); void sum_mat ( size_t m, size_t n, real_t *restrict A, size_t lda, real_t *restrict B, size_t ldb ); void transpose_mat(real_t *restrict A, size_t m, size_t n, real_t *restrict buffer_real_t); void transpose_mat2(real_t *restrict A, size_t m, size_t n, real_t *restrict outp); void transpose_mat3 ( real_t *restrict A, size_t lda, size_t m, size_t n, real_t *restrict outp, size_t ldb ); int_t coo_to_csr_plus_alloc ( int_t *restrict Xrow, int_t *restrict Xcol, real_t *restrict Xval, real_t *restrict W, int_t m, int_t n, size_t nnz, size_t *restrict *csr_p, int_t *restrict *csr_i, real_t *restrict *csr_v, real_t *restrict *csr_w ); void coo_to_csr ( int_t *restrict Xrow, int_t *restrict Xcol, real_t *restrict Xval, real_t *restrict W, int_t m, int_t n, size_t nnz, size_t *restrict csr_p, int_t *restrict csr_i, real_t *restrict csr_v, real_t *restrict csr_w ); void coo_to_csr_and_csc ( int_t *restrict Xrow, int_t *restrict Xcol, real_t *restrict Xval, real_t *restrict W, int_t m, int_t n, size_t nnz, size_t *restrict csr_p, int_t *restrict csr_i, real_t *restrict csr_v, size_t *restrict csc_p, int_t *restrict csc_i, real_t *restrict csc_v, real_t *restrict csr_w, real_t *restrict csc_w, int nthreads ); void row_means_csr(size_t indptr[], real_t *restrict values, real_t *restrict output, int_t m, int nthreads); extern bool should_stop_procedure; extern bool handle_is_locked; void set_interrup_global_variable(int_t s); int_t lbfgs_printer_collective ( void *instance, const real_t *x, const real_t *g, const real_t fx, const real_t xnorm, const real_t gnorm, const real_t step, size_t n, int_t k, int_t ls ); int_t lbfgs_printer_offsets ( void *instance, const real_t *x, const real_t *g, const real_t fx, const real_t xnorm, const real_t gnorm, const real_t step, size_t n, int_t k, int_t ls ); bool check_is_sorted(int_t arr[], int_t n); void qs_argpartition(int_t arr[], real_t values[], int_t n, int_t k); void append_ones_last_col ( real_t *restrict orig, size_t m, size_t n, real_t *restrict outp ); void fill_lower_triangle(real_t A[], size_t n, size_t lda); void print_err_msg(const char *msg); void print_oom_message(void); void act_on_interrupt(int retval, bool handle_interrupt, bool print_msg); #ifdef _FOR_R void R_nan_to_C_nan(real_t arr[], size_t n); #endif long double compensated_sum(real_t *arr, size_t n); long double compensated_sum_product(real_t *restrict arr1, real_t *restrict arr2, size_t n); void custom_syr(const int_t n, const real_t alpha, const real_t *restrict x, real_t *restrict A, const int_t lda); void set_blas_threads(int nthreads_set, int *nthreads_curr); #ifdef _FOR_R extern bool has_RhpcBLASctl; extern SEXP *ptr_glob_lst; extern int* ptr_nthreads; #elif defined(_FOR_PYTHON) extern void py_set_threads(int); extern int py_get_threads(void); #endif #if defined(_FOR_R) && defined(WRAPPED_GELSD) && !defined(USE_FLOAT) SEXP wrapper_GELSD(void *data); void clean_after_GELSD(void *cdata, Rboolean jump); #endif bool get_has_openmp(void); /* common.c */ real_t fun_grad_cannonical_form ( real_t *restrict A, int_t lda, real_t *restrict B, int_t ldb, real_t *restrict g_A, real_t *restrict g_B, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, bool full_dense, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc, bool user_bias, bool item_bias, real_t *restrict biasA, real_t *restrict biasB, real_t *restrict g_biasA, real_t *restrict g_biasB, real_t *restrict weight, real_t *restrict weightR, real_t *restrict weightC, real_t scaling, real_t *restrict buffer_real_t, real_t *restrict buffer_mt, int nthreads ); void factors_closed_form ( real_t *restrict a_vec, int_t k, real_t *restrict B, int_t n, int_t ldb, real_t *restrict Xa_dense, bool full_dense, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict weight, real_t *restrict buffer_real_t, real_t lam, real_t lam_last, real_t l1_lam, real_t l1_lam_last, bool scale_lam, bool scale_bias_const, real_t wsum, real_t *restrict precomputedTransBtBinvBt, real_t *restrict precomputedBtB, int_t cnt_NA, int_t ld_BtB, bool BtB_has_diag, bool BtB_is_scaled, real_t scale_BtB, int_t n_BtB, real_t *restrict precomputedBtBchol, bool NA_as_zero, bool use_cg, int_t max_cg_steps,/* <- 'cg' should not be used for new data*/ bool nonneg, int_t max_cd_steps, real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob, real_t multiplier_bias_BtX, bool force_add_diag ); void factors_explicit_cg ( real_t *restrict a_vec, int_t k, real_t *restrict B, int_t n, int_t ldb, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict weight, real_t *restrict buffer_real_t, real_t lam, real_t lam_last, int_t max_cg_steps ); void factors_explicit_cg_NA_as_zero_weighted ( real_t *restrict a_vec, int_t k, real_t *restrict B, int_t n, int_t ldb, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict weight, real_t *restrict precomputedBtB, int_t ld_BtB, real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob, real_t multiplier_bias_BtX, real_t *restrict buffer_real_t, real_t lam, real_t lam_last, int_t max_cg_steps ); void factors_explicit_cg_dense ( real_t *restrict a_vec, int_t k, real_t *restrict B, int_t n, int_t ldb, real_t *restrict Xa_dense, int_t cnt_NA, real_t *restrict weight, real_t *restrict precomputedBtB, int_t ld_BtB, real_t *restrict buffer_real_t, real_t lam, real_t lam_last, int_t max_cg_steps ); void factors_implicit_cg ( real_t *restrict a_vec, int_t k, real_t *restrict B, size_t ldb, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t lam, real_t *restrict precomputedBtB, int_t ld_BtB, int_t max_cg_steps, real_t *restrict buffer_real_t ); void factors_implicit_chol ( real_t *restrict a_vec, int_t k, real_t *restrict B, size_t ldb, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t lam, real_t l1_lam, real_t *restrict precomputedBtB, int_t ld_BtB, bool nonneg, int_t max_cd_steps, real_t *restrict buffer_real_t ); void solve_nonneg ( real_t *restrict BtB, real_t *restrict BtX, /* <- solution will be here */ real_t *restrict buffer_real_t, int_t k, real_t l1_lam, real_t l1_lam_last, size_t max_cd_steps, bool fill_lower ); void solve_nonneg_batch ( real_t *restrict BtB, real_t *restrict BtX, /* <- solution will be here */ real_t *restrict buffer_real_t, int_t m, int_t k, size_t lda, real_t l1_lam, real_t l1_lam_last, size_t max_cd_steps, int nthreads ); void solve_elasticnet ( real_t *restrict BtB, real_t *restrict BtX, /* <- solution will be here */ real_t *restrict buffer_real_t, int_t k, real_t l1_lam, real_t l1_lam_last, size_t max_cd_steps, bool fill_lower ); void solve_elasticnet_batch ( real_t *restrict BtB, real_t *restrict BtX, /* <- solution will be here */ real_t *restrict buffer_real_t, int_t m, int_t k, size_t lda, real_t l1_lam, real_t l1_lam_last, size_t max_cd_steps, int nthreads ); real_t fun_grad_Adense ( real_t *restrict g_A, real_t *restrict A, int_t lda, real_t *restrict B, int_t ldb, int_t m, int_t n, int_t k, real_t *restrict Xfull, real_t *restrict weight, real_t lam, real_t w, real_t lam_last, bool do_B, bool reset_grad, int nthreads, real_t *restrict buffer_real_t ); void add_lam_to_grad_and_fun ( real_t *restrict fun, real_t *restrict grad, real_t *restrict A, int_t m, int_t k, int_t lda, real_t lam, int nthreads ); typedef struct data_fun_grad_Adense { int_t lda; real_t *B; int_t ldb; int_t m; int_t n; int_t k; real_t *Xfull; real_t *weight; real_t lam; real_t w; real_t lam_last; int nthreads; real_t *buffer_real_t; } data_fun_grad_Adense; typedef struct data_fun_grad_Bdense { real_t *A; int_t lda; int_t ldb; int_t m; int_t n; int_t k; real_t *Xfull; real_t *weight; real_t lam; real_t w; real_t lam_last; int nthreads; real_t *buffer_real_t; } data_fun_grad_Bdense; real_t wrapper_fun_grad_Adense ( void *instance, real_t *x, real_t *g, const size_t n, const real_t step ); real_t wrapper_fun_grad_Bdense ( void *instance, real_t *x, real_t *g, const size_t n, const real_t step ); size_t buffer_size_optimizeA ( size_t n, bool full_dense, bool near_dense, bool some_full, bool do_B, bool has_dense, bool has_weights, bool NA_as_zero, bool nonneg, bool has_l1, size_t k, size_t nthreads, bool has_bias_static, bool pass_allocated_BtB, bool keep_precomputedBtB, bool use_cg, bool finalize_chol ); size_t buffer_size_optimizeA_implicit ( size_t k, size_t nthreads, bool pass_allocated_BtB, bool nonneg, bool has_l1, bool use_cg, bool finalize_chol ); void optimizeA ( real_t *restrict A, int_t lda, real_t *restrict B, int_t ldb, int_t m, int_t n, int_t k, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, real_t *restrict Xfull, int_t ldX, bool full_dense, bool near_dense, bool some_full, int_t cnt_NA[], real_t *restrict weight, bool NA_as_zero, real_t lam, real_t lam_last, real_t l1_lam, real_t l1_lam_last, bool scale_lam, bool scale_bias_const, real_t *restrict wsumA, bool do_B, int nthreads, bool use_cg, int_t max_cg_steps, bool nonneg, int_t max_cd_steps, real_t *restrict bias_restore, real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob, real_t *restrict bias_static, real_t multiplier_bias_BtX, bool keep_precomputedBtB, real_t *restrict precomputedBtB, bool *filled_BtB, real_t *restrict buffer_real_t ); void optimizeA_implicit ( real_t *restrict A, size_t lda, real_t *restrict B, size_t ldb, int_t m, int_t n, int_t k, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, real_t lam, real_t l1_lam, int nthreads, bool use_cg, int_t max_cg_steps, bool nonneg, int_t max_cd_steps, real_t *restrict precomputedBtB, /* <- will be calculated if not passed */ real_t *restrict buffer_real_t ); int_t calc_mean_and_center ( int_t ixA[], int_t ixB[], real_t *restrict *X_, size_t nnz, real_t *restrict *Xfull_, real_t *restrict Xtrans, int_t m, int_t n, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc, real_t *restrict weight, bool NA_as_zero, bool nonneg, bool center, int nthreads, real_t *restrict glob_mean, bool *modified_X, bool *modified_Xfull, bool allow_overwrite_X ); int_t initialize_biases ( real_t *restrict glob_mean, real_t *restrict biasA, real_t *restrict biasB, bool user_bias, bool item_bias, bool center, real_t lam_user, real_t lam_item, bool scale_lam, bool scale_bias_const, bool force_calc_user_scale, bool force_calc_item_scale, real_t *restrict scaling_biasA, real_t *restrict scaling_biasB, int_t m, int_t n, int_t m_bias, int_t n_bias, int_t ixA[], int_t ixB[], real_t *restrict *X_, size_t nnz, real_t *restrict *Xfull_, real_t *restrict Xtrans, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc, real_t *restrict weight, real_t *restrict Wtrans, real_t *restrict weightR, real_t *restrict weightC, bool nonneg, int nthreads, bool *modified_X, bool *modified_Xfull, bool allow_overwrite_X ); int_t initialize_biases_onesided ( real_t *restrict Xfull, int_t m, int_t n, bool do_B, int_t *restrict cnt_NA, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, real_t *restrict weight, real_t *restrict weightR, real_t glob_mean, bool NA_as_zero, bool nonneg, real_t lam, bool scale_lam, real_t *restrict wsumA, real_t *restrict biasA, int nthreads ); int_t initialize_biases_twosided ( real_t *restrict Xfull, real_t *restrict Xtrans, int_t *restrict cnt_NA_byrow, int_t *restrict cnt_NA_bycol, int_t m, int_t n, bool NA_as_zero, bool nonneg, double glob_mean, size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *Xcsr, size_t *restrict Xcsc_p, int_t *restrict Xcsc_i, real_t *Xcsc, real_t *restrict weight, real_t *restrict Wtrans, real_t *restrict weightR, real_t *restrict weightC, real_t lam_user, real_t lam_item, bool scale_lam, real_t *restrict wsumA, real_t *restrict wsumB, real_t *restrict biasA, real_t *restrict biasB, int nthreads ); int_t center_by_cols ( real_t *restrict col_means, real_t *restrict *Xfull_, int_t m, int_t n, int_t ixA[], int_t ixB[], real_t *restrict *X_, size_t nnz, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc, int nthreads, bool *modified_X, bool *modified_Xfull ); bool check_sparse_indices ( int_t n, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict Xa, int_t ixB[], size_t nnz ); void predict_multiple ( real_t *restrict A, int_t k_user, real_t *restrict B, int_t k_item, real_t *restrict biasA, real_t *restrict biasB, real_t glob_mean, int_t k, int_t k_main, int_t m, int_t n, int_t predA[], int_t predB[], size_t nnz, real_t *restrict outp, int nthreads ); int_t cmp_int(const void *a, const void *b); extern real_t *ptr_real_t_glob; #if !defined(_WIN32) && !defined(_WIN64) #pragma omp threadprivate(ptr_real_t_glob) /* Note: will not be used inside OMP, this is a precausion just in case */ #endif int_t cmp_argsort(const void *a, const void *b); int_t topN ( real_t *restrict a_vec, int_t k_user, real_t *restrict B, int_t k_item, real_t *restrict biasB, real_t glob_mean, real_t biasA, int_t k, int_t k_main, int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int_t n, int nthreads ); CMFREC_EXPORTABLE int_t fit_most_popular ( real_t *restrict biasA, real_t *restrict biasB, real_t *restrict glob_mean, real_t lam_user, real_t lam_item, bool scale_lam, bool scale_bias_const, real_t alpha, int_t m, int_t n, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, real_t *restrict weight, bool implicit, bool adjust_weight, bool apply_log_transf, bool nonneg, bool NA_as_zero, real_t *restrict w_main_multiplier, int nthreads ); int_t fit_most_popular_internal ( real_t *restrict biasA, real_t *restrict biasB, real_t *restrict glob_mean, bool center, real_t lam_user, real_t lam_item, bool scale_lam, bool scale_bias_const, real_t alpha, int_t m, int_t n, int_t ixA[], int_t ixB[], real_t *restrict *X_, size_t nnz, real_t *restrict *Xfull_, real_t *restrict weight, bool implicit, bool adjust_weight, bool apply_log_transf, bool nonneg, real_t *restrict w_main_multiplier, int nthreads, bool *free_X, bool *free_Xfull, bool allow_overwrite_X ); CMFREC_EXPORTABLE int_t topN_old_most_popular ( bool user_bias, real_t a_bias, real_t *restrict biasA, int_t row_index, real_t *restrict biasB, real_t glob_mean, int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int_t n ); CMFREC_EXPORTABLE int_t predict_X_old_most_popular ( int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict, real_t *restrict biasA, real_t *restrict biasB, real_t glob_mean, int_t m, int_t n ); /* collective.c */ void nvars_collective_fun_grad ( size_t m, size_t n, size_t m_u, size_t n_i, size_t m_ubin, size_t n_ibin, size_t p, size_t q, size_t pbin, size_t qbin, size_t nnz, size_t nnz_U, size_t nnz_I, size_t k, size_t k_main, size_t k_user, size_t k_item, bool user_bias, bool item_bias, size_t nthreads, real_t *X, real_t *Xfull, real_t *U, real_t *Ub, real_t *II, real_t *Ib, real_t *U_sp, real_t *U_csr, real_t *I_sp, real_t *I_csr, size_t *nvars, size_t *nbuffer, size_t *nbuffer_mt ); real_t collective_fun_grad ( real_t *restrict values, real_t *restrict grad, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc, real_t *restrict weight, real_t *restrict weightR, real_t *restrict weightC, bool user_bias, bool item_bias, real_t lam, real_t *restrict lam_unique, real_t *restrict U, int_t m_u, int_t p, bool U_has_NA, real_t *restrict II, int_t n_i, int_t q, bool I_has_NA, real_t *restrict Ub, int_t m_ubin, int_t pbin, bool Ub_has_NA, real_t *restrict Ib, int_t n_ibin, int_t qbin, bool Ib_has_NA, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, size_t U_csc_p[], int_t U_csc_i[], real_t *restrict U_csc, size_t I_csr_p[], int_t I_csr_i[], real_t *restrict I_csr, size_t I_csc_p[], int_t I_csc_i[], real_t *restrict I_csc, real_t *restrict buffer_real_t, real_t *restrict buffer_mt, int_t k_main, int_t k_user, int_t k_item, real_t w_main, real_t w_user, real_t w_item, int nthreads ); real_t collective_fun_grad_bin ( real_t *restrict A, int_t lda, real_t *restrict Cb, int_t ldc, real_t *restrict g_A, real_t *restrict g_Cb, real_t *restrict Ub, int_t m, int_t pbin, int_t k, bool Ub_has_NA, double w_user, real_t *restrict buffer_real_t, int nthreads ); real_t collective_fun_grad_single ( real_t *restrict a_vec, real_t *restrict g_A, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t *restrict u_vec, int_t p, int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec, real_t *restrict u_bin_vec, int_t pbin, bool u_vec_has_NA, bool u_bin_vec_has_NA, real_t *restrict B, int_t n, real_t *restrict C, real_t *restrict Cb, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict Xa_dense, real_t *restrict weight, real_t *restrict buffer_real_t, real_t lam, real_t w_main, real_t w_user, real_t lam_last ); typedef struct data_factors_fun_grad { int_t k; int_t k_user; int_t k_item; int_t k_main; real_t *u_vec; int_t p; int_t *u_vec_ixB; real_t *u_vec_sp; size_t nnz_u_vec; real_t *u_bin_vec; int_t pbin; bool u_vec_has_NA; bool u_bin_vec_has_NA; real_t *B; int_t n; real_t *C; real_t *Cb; real_t *Xa; int_t *ixB; real_t *weight; size_t nnz; real_t *Xa_dense; real_t *buffer_real_t; real_t lam; real_t w_main; real_t w_user; real_t lam_last; } data_factors_fun_grad; real_t wrapper_factors_fun_grad ( void *instance, real_t *x, real_t *g, const size_t n, const real_t step ); int_t collective_factors_lbfgs ( real_t *restrict a_vec, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t *restrict u_vec, int_t p, int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec, real_t *restrict u_bin_vec, int_t pbin, bool u_vec_has_NA, bool u_bin_vec_has_NA, real_t *restrict B, int_t n, real_t *restrict C, real_t *restrict Cb, real_t *restrict Xa, int_t ixB[], real_t *restrict weight, size_t nnz, real_t *restrict Xa_dense, real_t *restrict buffer_real_t, real_t lam, real_t w_main, real_t w_user, real_t lam_last ); void collective_closed_form_block ( real_t *restrict a_vec, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t *restrict Xa_dense, real_t *restrict Xa, int_t ixB[], size_t nnz, int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec, real_t *restrict u_vec, bool NA_as_zero_X, bool NA_as_zero_U, real_t *restrict B, int_t n, int_t ldb, real_t *restrict C, int_t p, real_t *restrict Bi, int_t k_main_i, bool add_implicit_features, real_t *restrict Xones, int_t incXones, real_t *restrict weight, real_t lam, real_t w_user, real_t w_implicit, real_t lam_last, real_t l1_lam, real_t l1_lam_bias, bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, real_t wsum, real_t *restrict precomputedBtB, int_t cnt_NA_x, real_t *restrict precomputedCtCw, int_t cnt_NA_u, real_t *restrict precomputedBeTBeChol, int_t n_BtB, real_t *restrict precomputedBiTBi, bool add_X, bool add_U, bool use_cg, int_t max_cg_steps,/* <- 'cg' should not be used for new data*/ bool nonneg, int_t max_cd_steps, real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob, real_t *restrict bias_CtU, real_t *restrict buffer_real_t ); void collective_closed_form_block_implicit ( real_t *restrict a_vec, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t *restrict B, int_t n, real_t *restrict C, int_t p, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict u_vec, int_t cnt_NA_u, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, bool NA_as_zero_U, real_t lam, real_t l1_lam, real_t w_user, real_t *restrict bias_CtU, real_t *restrict precomputedBeTBe, real_t *restrict precomputedBtB, /* for cg, should NOT have lambda added */ real_t *restrict precomputedBeTBeChol, real_t *restrict precomputedCtCw, bool add_U, bool shapes_match, bool use_cg, int_t max_cg_steps,/* <- 'cg' should not be used for new data*/ bool nonneg, int_t max_cd_steps, real_t *restrict buffer_real_t ); void collective_block_cg ( real_t *restrict a_vec, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t *restrict Xa_dense, real_t *restrict Xa, int_t ixB[], size_t nnz, int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec, real_t *restrict u_vec, bool NA_as_zero_X, bool NA_as_zero_U, real_t *restrict B, int_t n, int_t ldb, real_t *restrict C, int_t p, bool add_implicit_features, real_t *restrict Xones, int_t incXones, real_t * restrict Bi, real_t *restrict precomputedBiTBi, int_t k_main_i, real_t *restrict weight, real_t lam, real_t w_user, real_t w_implicit, real_t lam_last, int_t cnt_NA_x, int_t cnt_NA_u, real_t *restrict precomputedBtB, real_t *restrict precomputedCtC, /* should NOT be multiplied by 'w_user' */ int_t max_cg_steps, real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob, real_t *restrict bias_CtU, real_t *restrict buffer_real_t ); void collective_block_cg_implicit ( real_t *restrict a_vec, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t *restrict Xa, int_t ixB[], size_t nnz, int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec, real_t *restrict u_vec, bool NA_as_zero_U, real_t *restrict B, int_t n, real_t *restrict C, int_t p, real_t lam, real_t w_user, int_t cnt_NA_u, int_t max_cg_steps, real_t *restrict bias_CtU, real_t *restrict precomputedBtB, real_t *restrict precomputedCtC, /* should NOT be multiplied by weight */ real_t *restrict buffer_real_t ); void optimizeA_collective_implicit ( real_t *restrict A, real_t *restrict B, real_t *restrict C, int_t m, int_t m_u, int_t n, int_t p, int_t k, int_t k_main, int_t k_user, int_t k_item, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict U, int_t cnt_NA_u[], real_t *restrict U_colmeans, bool full_dense_u, bool near_dense_u, bool NA_as_zero_U, real_t lam, real_t l1_lam, real_t w_user, int nthreads, bool use_cg, int_t max_cg_steps, bool nonneg, int_t max_cd_steps, real_t *restrict precomputedBtB, /* will not have lambda with CG */ real_t *restrict precomputedBeTBe, real_t *restrict precomputedBeTBeChol, real_t *restrict precomputedCtC, real_t *restrict precomputedCtUbias, bool *filled_BeTBe, bool *filled_BeTBeChol, bool *filled_CtC, bool *filled_CtUbias, real_t *restrict buffer_real_t ); int_t collective_factors_cold ( real_t *restrict a_vec, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict u_bin_vec, int_t pbin, real_t *restrict C, real_t *restrict Cb, real_t *restrict TransCtCinvCt, real_t *restrict CtCw, real_t *restrict col_means, real_t *restrict CtUbias, int_t k, int_t k_user, int_t k_main, real_t lam, real_t l1_lam, real_t w_main, real_t w_user, bool scale_lam_sideinfo, bool NA_as_zero_U, bool nonneg ); int_t collective_factors_cold_implicit ( real_t *restrict a_vec, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict B, int_t n, real_t *restrict C, real_t *restrict BeTBe, real_t *restrict BtB, real_t *restrict BeTBeChol, real_t *restrict col_means, real_t *restrict CtUbias, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t l1_lam, real_t w_main, real_t w_user, real_t w_main_multiplier, bool NA_as_zero_U, bool nonneg ); int_t collective_factors_warm ( real_t *restrict a_vec, real_t *restrict a_bias, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict u_bin_vec, int_t pbin, real_t *restrict C, real_t *restrict Cb, real_t glob_mean, real_t *restrict biasB, real_t *restrict col_means, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict Xa_dense, int_t n, real_t *restrict weight, real_t *restrict B, real_t *restrict Bi, bool add_implicit_features, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t w_main, real_t w_user, real_t w_implicit,real_t lam_bias, real_t l1_lam, real_t l1_lam_bias, bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, int_t n_max, bool include_all_X, real_t *restrict TransBtBinvBt, real_t *restrict BtXbias, real_t *restrict BtB, real_t *restrict BeTBeChol, real_t *restrict BiTBi, real_t *restrict CtCw, real_t *restrict CtUbias, bool NA_as_zero_U, bool NA_as_zero_X, bool nonneg, real_t *restrict B_plus_bias ); int_t collective_factors_warm_implicit ( real_t *restrict a_vec, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, bool NA_as_zero_U, bool nonneg, real_t *restrict col_means, real_t *restrict B, int_t n, real_t *restrict C, real_t *restrict Xa, int_t ixB[], size_t nnz, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t l1_lam, real_t alpha, real_t w_main, real_t w_user, real_t w_main_multiplier, real_t *restrict BeTBe, real_t *restrict BtB, real_t *restrict BeTBeChol, real_t *restrict CtUbias ); real_t fun_grad_A_collective ( real_t *restrict A, real_t *restrict g_A, real_t *restrict B, real_t *restrict C, int_t m, int_t m_u, int_t n, int_t p, int_t k, int_t k_main, int_t k_user, int_t k_item, int_t padding, real_t *restrict Xfull, bool full_dense, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, real_t *restrict weight, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict U, bool full_dense_u, real_t lam, real_t w_main, real_t w_user, real_t lam_last, bool do_B, int nthreads, real_t *restrict buffer_real_t ); typedef struct data_fun_grad_Adense_col { real_t *B; real_t *C; int_t m; int_t m_u; int_t n; int_t p; int_t k; int_t k_main; int_t k_user; int_t k_item; int_t padding; real_t *Xfull; bool full_dense; size_t *Xcsr_p; int_t *Xcsr_i; real_t *Xcsr; real_t *weight; size_t *U_csr_p; int_t *U_csr_i; real_t *U_csr; real_t *U; bool full_dense_u; real_t lam; real_t w_main; real_t w_user; real_t lam_last; bool do_B; int nthreads; real_t *buffer_real_t; } data_fun_grad_Adense_col; real_t wrapper_fun_grad_Adense_col ( void *instance, real_t *x, real_t *g, const size_t n, const real_t step ); size_t buffer_size_optimizeA_collective ( size_t m, size_t m_u, size_t n, size_t p, size_t k, size_t k_main, size_t k_user, bool full_dense, bool near_dense, bool some_full, bool do_B, bool has_dense, bool has_sparse, bool has_weights, bool NA_as_zero_X, bool has_dense_U, bool has_sparse_U, bool full_dense_u, bool near_dense_u, bool some_full_u, bool NA_as_zero_U, bool add_implicit_features, size_t k_main_i, size_t nthreads, bool use_cg, bool finalize_chol, bool nonneg, bool has_l1, bool keep_precomputed, bool pass_allocated_BtB, bool pass_allocated_CtCw, bool pass_allocated_BeTBeChol, bool pass_allocated_BiTBi ); size_t buffer_size_optimizeA_collective_implicit ( size_t m, size_t m_u, size_t p, size_t k, size_t k_main, size_t k_user, bool has_sparse_U, bool NA_as_zero_U, size_t nthreads, bool use_cg, bool nonneg, bool has_l1, bool pass_allocated_BtB, bool pass_allocated_BeTBe, bool pass_allocated_BeTBeChol, bool pass_allocated_CtC, bool finalize_chol ); void optimizeA_collective ( real_t *restrict A, int_t lda, real_t *restrict B, int_t ldb, real_t *restrict C, real_t *restrict Bi, int_t m, int_t m_u, int_t n, int_t p, int_t k, int_t k_main, int_t k_user, int_t k_item, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, real_t *restrict Xfull, int_t ldX, bool full_dense, bool near_dense, bool some_full, int_t cnt_NA_x[], real_t *restrict weight, bool NA_as_zero_X, real_t *restrict Xones, int_t k_main_i, int_t ldXones, bool add_implicit_features, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict U, int_t cnt_NA_u[], real_t *restrict U_colmeans, bool full_dense_u, bool near_dense_u, bool some_full_u, bool NA_as_zero_U, real_t lam, real_t w_user, real_t w_implicit, real_t lam_last, real_t l1_lam, real_t l1_lam_bias, bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, real_t *restrict wsumA, bool do_B, int nthreads, bool use_cg, int_t max_cg_steps, bool nonneg, int_t max_cd_steps, real_t *restrict bias_restore, real_t *restrict bias_BtX, real_t *restrict bias_X, real_t bias_X_glob, bool keep_precomputed, real_t *restrict precomputedBtB, real_t *restrict precomputedCtCw, real_t *restrict precomputedBeTBeChol, real_t *restrict precomputedBiTBi, real_t *restrict precomputedCtUbias, bool *filled_BtB, bool *filled_CtCw, bool *filled_BeTBeChol, bool *filled_CtUbias, bool *CtC_is_scaled, real_t *restrict buffer_real_t ); void build_BeTBe ( real_t *restrict bufferBeTBe, real_t *restrict B, int_t ldb, real_t *restrict C, int_t k, int_t k_user, int_t k_main, int_t k_item, int_t n, int_t p, real_t lam, real_t w_user ); void build_BtB_CtC ( real_t *restrict BtB, real_t *restrict CtC, real_t *restrict B, int_t n, int_t ldb, real_t *restrict C, int_t p, int_t k, int_t k_user, int_t k_main, int_t k_item, real_t w_user, real_t *restrict weight ); void build_XBw ( real_t *restrict A, int_t lda, real_t *restrict B, int_t ldb, real_t *restrict Xfull, int_t ldX, int_t m, int_t n, int_t k, real_t w, bool do_B, bool overwrite ); int_t preprocess_vec ( real_t *restrict *vec_full_, int_t n, int_t *restrict ix_vec, real_t *restrict *vec_sp_, size_t nnz, real_t glob_mean, real_t lam, real_t *restrict col_means, real_t *restrict vec_mean, int_t *restrict cnt_NA, bool *modified_vec, bool *modified_vec_sp ); int_t convert_sparse_X ( int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, size_t **Xcsr_p, int_t **Xcsr_i, real_t *restrict *Xcsr, size_t **Xcsc_p, int_t **Xcsc_i, real_t *restrict *Xcsc, real_t *restrict weight, real_t *restrict *weightR, real_t *restrict *weightC, int_t m, int_t n, int nthreads ); int_t preprocess_sideinfo_matrix ( real_t *restrict *U_, int_t m_u, int_t p, int_t U_row[], int_t U_col[], real_t *restrict *U_sp_, size_t nnz_U, real_t *U_colmeans, real_t *restrict *Utrans, size_t **U_csr_p, int_t **U_csr_i, real_t *restrict *U_csr, size_t **U_csc_p, int_t **U_csc_i, real_t *restrict *U_csc, int_t *restrict *cnt_NA_u_byrow, int_t *restrict *cnt_NA_u_bycol, bool *restrict full_dense_u, bool *restrict near_dense_u_row, bool *restrict near_dense_u_col, bool *restrict some_full_u_row, bool *restrict some_full_u_col, bool NA_as_zero_U, bool nonneg, int nthreads, bool *modified_U, bool *modified_Usp ); real_t wrapper_collective_fun_grad ( void *instance, real_t *x, real_t *g, const size_t n, const real_t step ); typedef struct data_collective_fun_grad { int_t m; int_t n; int_t k; int_t *ixA; int_t *ixB; real_t *X; size_t nnz; real_t *Xfull; size_t *Xcsr_p; int_t *Xcsr_i; real_t *Xcsr; size_t *Xcsc_p; int_t *Xcsc_i; real_t *Xcsc; real_t *weight; real_t *weightR; real_t *weightC; bool user_bias; bool item_bias; real_t lam; real_t *lam_unique; real_t *U; int_t m_u; int_t p; bool U_has_NA; real_t *II; int_t n_i; int_t q; bool I_has_NA; real_t *Ub; int_t m_ubin; int_t pbin; bool Ub_has_NA; real_t *Ib; int_t n_ibin; int_t qbin; bool Ib_has_NA; int_t *U_row; int_t *U_col; real_t *U_sp; size_t nnz_U; int_t *I_row; int_t *I_col; real_t *I_sp; size_t nnz_I; size_t *U_csr_p; int_t *U_csr_i; real_t *U_csr; size_t *U_csc_p; int_t *U_csc_i; real_t *U_csc; size_t *I_csr_p; int_t *I_csr_i; real_t *I_csr; size_t *I_csc_p; int_t *I_csc_i; real_t *I_csc; real_t *buffer_real_t; real_t *buffer_mt; int_t k_main; int_t k_user; int_t k_item; real_t w_main; real_t w_user; real_t w_item; int nthreads; int_t print_every; int_t nfev; int_t niter; } data_collective_fun_grad; int_t fit_collective_explicit_lbfgs_internal ( real_t *restrict values, bool reset_values, real_t *restrict glob_mean, real_t *restrict U_colmeans, real_t *restrict I_colmeans, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, real_t *restrict weight, bool user_bias, bool item_bias, bool center, real_t lam, real_t *restrict lam_unique, real_t *restrict U, int_t m_u, int_t p, real_t *restrict II, int_t n_i, int_t q, real_t *restrict Ub, int_t m_ubin, int_t pbin, real_t *restrict Ib, int_t n_ibin, int_t qbin, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, int_t k_main, int_t k_user, int_t k_item, real_t w_main, real_t w_user, real_t w_item, int_t n_corr_pairs, size_t maxiter, int_t seed, int nthreads, bool prefer_onepass, bool verbose, int_t print_every, bool handle_interrupt, int_t *restrict niter, int_t *restrict nfev, real_t *restrict B_plus_bias ); CMFREC_EXPORTABLE int_t fit_collective_explicit_lbfgs ( real_t *restrict biasA, real_t *restrict biasB, real_t *restrict A, real_t *restrict B, real_t *restrict C, real_t *restrict Cb, real_t *restrict D, real_t *restrict Db, bool reset_values, int_t seed, real_t *restrict glob_mean, real_t *restrict U_colmeans, real_t *restrict I_colmeans, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, real_t *restrict weight, bool user_bias, bool item_bias, bool center, real_t lam, real_t *restrict lam_unique, real_t *restrict U, int_t m_u, int_t p, real_t *restrict II, int_t n_i, int_t q, real_t *restrict Ub, int_t m_ubin, int_t pbin, real_t *restrict Ib, int_t n_ibin, int_t qbin, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, int_t k_main, int_t k_user, int_t k_item, real_t w_main, real_t w_user, real_t w_item, int_t n_corr_pairs, size_t maxiter, int nthreads, bool prefer_onepass, bool verbose, int_t print_every, bool handle_interrupt, int_t *restrict niter, int_t *restrict nfev, bool precompute_for_predictions, bool include_all_X, real_t *restrict B_plus_bias, real_t *restrict precomputedBtB, real_t *restrict precomputedTransBtBinvBt, real_t *restrict precomputedBeTBeChol, real_t *restrict precomputedTransCtCinvCt, real_t *restrict precomputedCtCw ); CMFREC_EXPORTABLE int_t fit_collective_explicit_als ( real_t *restrict biasA, real_t *restrict biasB, real_t *restrict A, real_t *restrict B, real_t *restrict C, real_t *restrict D, real_t *restrict Ai, real_t *restrict Bi, bool add_implicit_features, bool reset_values, int_t seed, real_t *restrict glob_mean, real_t *restrict U_colmeans, real_t *restrict I_colmeans, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, real_t *restrict weight, bool user_bias, bool item_bias, bool center, real_t lam, real_t *restrict lam_unique, real_t l1_lam, real_t *restrict l1_lam_unique, bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, real_t *restrict scaling_biasA, real_t *restrict scaling_biasB, real_t *restrict U, int_t m_u, int_t p, real_t *restrict II, int_t n_i, int_t q, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, bool NA_as_zero_X, bool NA_as_zero_U, bool NA_as_zero_I, int_t k_main, int_t k_user, int_t k_item, real_t w_main, real_t w_user, real_t w_item, real_t w_implicit, int_t niter, int nthreads, bool verbose, bool handle_interrupt, bool use_cg, int_t max_cg_steps, bool finalize_chol, bool nonneg, int_t max_cd_steps, bool nonneg_C, bool nonneg_D, bool precompute_for_predictions, bool include_all_X, real_t *restrict B_plus_bias, real_t *restrict precomputedBtB, real_t *restrict precomputedTransBtBinvBt, real_t *restrict precomputedBtXbias, real_t *restrict precomputedBeTBeChol, real_t *restrict precomputedBiTBi, real_t *restrict precomputedTransCtCinvCt, real_t *restrict precomputedCtCw, real_t *restrict precomputedCtUbias ); CMFREC_EXPORTABLE int_t fit_collective_implicit_als ( real_t *restrict A, real_t *restrict B, real_t *restrict C, real_t *restrict D, bool reset_values, int_t seed, real_t *restrict U_colmeans, real_t *restrict I_colmeans, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t lam, real_t *restrict lam_unique, real_t l1_lam, real_t *restrict l1_lam_unique, real_t *restrict U, int_t m_u, int_t p, real_t *restrict II, int_t n_i, int_t q, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, bool NA_as_zero_U, bool NA_as_zero_I, int_t k_main, int_t k_user, int_t k_item, real_t w_main, real_t w_user, real_t w_item, real_t *restrict w_main_multiplier, real_t alpha, bool adjust_weight, bool apply_log_transf, int_t niter, int nthreads, bool verbose, bool handle_interrupt, bool use_cg, int_t max_cg_steps, bool finalize_chol, bool nonneg, int_t max_cd_steps, bool nonneg_C, bool nonneg_D, bool precompute_for_predictions, real_t *restrict precomputedBtB, real_t *restrict precomputedBeTBe, real_t *restrict precomputedBeTBeChol, real_t *restrict precomputedCtUbias ); CMFREC_EXPORTABLE int_t precompute_collective_explicit ( real_t *restrict B, int_t n, int_t n_max, bool include_all_X, real_t *restrict C, int_t p, real_t *restrict Bi, bool add_implicit_features, real_t *restrict biasB, real_t glob_mean, bool NA_as_zero_X, real_t *restrict U_colmeans, bool NA_as_zero_U, int_t k, int_t k_user, int_t k_item, int_t k_main, bool user_bias, bool nonneg, real_t lam, real_t *restrict lam_unique, bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, real_t scaling_biasA, real_t w_main, real_t w_user, real_t w_implicit, real_t *restrict B_plus_bias, real_t *restrict BtB, real_t *restrict TransBtBinvBt, real_t *restrict BtXbias, real_t *restrict BeTBeChol, real_t *restrict BiTBi, real_t *restrict TransCtCinvCt, real_t *restrict CtCw, real_t *restrict CtUbias ); CMFREC_EXPORTABLE int_t precompute_collective_implicit ( real_t *restrict B, int_t n, real_t *restrict C, int_t p, real_t *restrict U_colmeans, bool NA_as_zero_U, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t w_main, real_t w_user, real_t w_main_multiplier, bool nonneg, bool extra_precision, real_t *restrict BtB, real_t *restrict BeTBe, real_t *restrict BeTBeChol, real_t *restrict CtUbias ); CMFREC_EXPORTABLE int_t factors_collective_explicit_single ( real_t *restrict a_vec, real_t *restrict a_bias, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict u_bin_vec, int_t pbin, bool NA_as_zero_U, bool NA_as_zero_X, bool nonneg, real_t *restrict C, real_t *restrict Cb, real_t glob_mean, real_t *restrict biasB, real_t *restrict U_colmeans, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict Xa_dense, int_t n, real_t *restrict weight, real_t *restrict B, real_t *restrict Bi, bool add_implicit_features, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t *restrict lam_unique, real_t l1_lam, real_t *restrict l1_lam_unique, bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, real_t scaling_biasA, real_t w_main, real_t w_user, real_t w_implicit, int_t n_max, bool include_all_X, real_t *restrict BtB, real_t *restrict TransBtBinvBt, real_t *restrict BtXbias, real_t *restrict BeTBeChol, real_t *restrict BiTBi, real_t *restrict CtCw, real_t *restrict TransCtCinvCt, real_t *restrict CtUbias, real_t *restrict B_plus_bias ); CMFREC_EXPORTABLE int_t factors_collective_implicit_single ( real_t *restrict a_vec, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, bool NA_as_zero_U, bool nonneg, real_t *restrict U_colmeans, real_t *restrict B, int_t n, real_t *restrict C, real_t *restrict Xa, int_t ixB[], size_t nnz, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t l1_lam, real_t alpha, real_t w_main, real_t w_user, real_t w_main_multiplier, bool apply_log_transf, real_t *restrict BeTBe, real_t *restrict BtB, real_t *restrict BeTBeChol, real_t *restrict CtUbias ); CMFREC_EXPORTABLE int_t factors_collective_explicit_multiple ( real_t *restrict A, real_t *restrict biasA, int_t m, real_t *restrict U, int_t m_u, int_t p, bool NA_as_zero_U, bool NA_as_zero_X, bool nonneg, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict Ub, int_t m_ubin, int_t pbin, real_t *restrict C, real_t *restrict Cb, real_t glob_mean, real_t *restrict biasB, real_t *restrict U_colmeans, real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz, size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr, real_t *restrict Xfull, int_t n, real_t *restrict weight, real_t *restrict B, real_t *restrict Bi, bool add_implicit_features, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t *restrict lam_unique, real_t l1_lam, real_t *restrict l1_lam_unique, bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, real_t scaling_biasA, real_t w_main, real_t w_user, real_t w_implicit, int_t n_max, bool include_all_X, real_t *restrict BtB, real_t *restrict TransBtBinvBt, real_t *restrict BtXbias, real_t *restrict BeTBeChol, real_t *restrict BiTBi, real_t *restrict TransCtCinvCt, real_t *restrict CtCw, real_t *restrict CtUbias, real_t *restrict B_plus_bias, int nthreads ); CMFREC_EXPORTABLE int_t factors_collective_implicit_multiple ( real_t *restrict A, int_t m, real_t *restrict U, int_t m_u, int_t p, bool NA_as_zero_U, bool nonneg, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz, size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr, real_t *restrict B, int_t n, real_t *restrict C, real_t *restrict U_colmeans, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t l1_lam, real_t alpha, real_t w_main, real_t w_user, real_t w_main_multiplier, bool apply_log_transf, real_t *restrict BeTBe, real_t *restrict BtB, real_t *restrict BeTBeChol, real_t *restrict CtUbias, int nthreads ); CMFREC_EXPORTABLE int_t impute_X_collective_explicit ( int_t m, bool user_bias, real_t *restrict U, int_t m_u, int_t p, bool NA_as_zero_U, bool nonneg, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict Ub, int_t m_ubin, int_t pbin, real_t *restrict C, real_t *restrict Cb, real_t glob_mean, real_t *restrict biasB, real_t *restrict U_colmeans, real_t *restrict Xfull, int_t n, real_t *restrict weight, real_t *restrict B, real_t *restrict Bi, bool add_implicit_features, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t *restrict lam_unique, real_t l1_lam, real_t *restrict l1_lam_unique, bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, real_t scaling_biasA, real_t w_main, real_t w_user, real_t w_implicit, int_t n_max, bool include_all_X, real_t *restrict BtB, real_t *restrict TransBtBinvBt, real_t *restrict BeTBeChol, real_t *restrict BiTBi, real_t *restrict TransCtCinvCt, real_t *restrict CtCw, real_t *restrict CtUbias, real_t *restrict B_plus_bias, int nthreads ); CMFREC_EXPORTABLE int_t topN_old_collective_explicit ( real_t *restrict a_vec, real_t a_bias, real_t *restrict A, real_t *restrict biasA, int_t row_index, real_t *restrict B, real_t *restrict biasB, real_t glob_mean, int_t k, int_t k_user, int_t k_item, int_t k_main, int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int_t n, int_t n_max, bool include_all_X, int nthreads ); CMFREC_EXPORTABLE int_t topN_old_collective_implicit ( real_t *restrict a_vec, real_t *restrict A, int_t row_index, real_t *restrict B, int_t k, int_t k_user, int_t k_item, int_t k_main, int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int_t n, int nthreads ); CMFREC_EXPORTABLE int_t topN_new_collective_explicit ( /* inputs for the factors */ bool user_bias, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict u_bin_vec, int_t pbin, bool NA_as_zero_U, bool NA_as_zero_X, bool nonneg, real_t *restrict C, real_t *restrict Cb, real_t glob_mean, real_t *restrict biasB, real_t *restrict U_colmeans, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict Xa_dense, int_t n, real_t *restrict weight, real_t *restrict B, real_t *restrict Bi, bool add_implicit_features, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t *restrict lam_unique, real_t l1_lam, real_t *restrict l1_lam_unique, bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, real_t scaling_biasA, real_t w_main, real_t w_user, real_t w_implicit, int_t n_max, bool include_all_X, real_t *restrict BtB, real_t *restrict TransBtBinvBt, real_t *restrict BtXbias, real_t *restrict BeTBeChol, real_t *restrict BiTBi, real_t *restrict CtCw, real_t *restrict TransCtCinvCt, real_t *restrict CtUbias, real_t *restrict B_plus_bias, /* inputs for topN */ int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int nthreads ); CMFREC_EXPORTABLE int_t topN_new_collective_implicit ( /* inputs for the factors */ int_t n, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, bool NA_as_zero_U, bool nonneg, real_t *restrict U_colmeans, real_t *restrict B, real_t *restrict C, real_t *restrict Xa, int_t ixB[], size_t nnz, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t l1_lam, real_t alpha, real_t w_main, real_t w_user, real_t w_main_multiplier, bool apply_log_transf, real_t *restrict BeTBe, real_t *restrict BtB, real_t *restrict BeTBeChol, real_t *restrict CtUbias, /* inputs for topN */ int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int nthreads ); CMFREC_EXPORTABLE int_t predict_X_old_collective_explicit ( int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict, real_t *restrict A, real_t *restrict biasA, real_t *restrict B, real_t *restrict biasB, real_t glob_mean, int_t k, int_t k_user, int_t k_item, int_t k_main, int_t m, int_t n_max, int nthreads ); CMFREC_EXPORTABLE int_t predict_X_old_collective_implicit ( int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict, real_t *restrict A, real_t *restrict B, int_t k, int_t k_user, int_t k_item, int_t k_main, int_t m, int_t n, int nthreads ); CMFREC_EXPORTABLE int_t predict_X_new_collective_explicit ( /* inputs for predictions */ int_t m_new, int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict, int nthreads, /* inputs for factors */ bool user_bias, real_t *restrict U, int_t m_u, int_t p, bool NA_as_zero_U, bool NA_as_zero_X, bool nonneg, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict Ub, int_t m_ubin, int_t pbin, real_t *restrict C, real_t *restrict Cb, real_t glob_mean, real_t *restrict biasB, real_t *restrict U_colmeans, real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz, size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr, real_t *restrict Xfull, int_t n, real_t *restrict weight, real_t *restrict B, real_t *restrict Bi, bool add_implicit_features, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t *restrict lam_unique, real_t l1_lam, real_t *restrict l1_lam_unique, bool scale_lam, bool scale_lam_sideinfo, bool scale_bias_const, real_t scaling_biasA, real_t w_main, real_t w_user, real_t w_implicit, int_t n_max, bool include_all_X, real_t *restrict BtB, real_t *restrict TransBtBinvBt, real_t *restrict BtXbias, real_t *restrict BeTBeChol, real_t *restrict BiTBi, real_t *restrict TransCtCinvCt, real_t *restrict CtCw, real_t *restrict CtUbias, real_t *restrict B_plus_bias ); CMFREC_EXPORTABLE int_t predict_X_new_collective_implicit ( /* inputs for predictions */ int_t m_new, int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict, int nthreads, /* inputs for factors */ real_t *restrict U, int_t m_u, int_t p, bool NA_as_zero_U, bool nonneg, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz, size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr, real_t *restrict B, int_t n, real_t *restrict C, real_t *restrict U_colmeans, int_t k, int_t k_user, int_t k_item, int_t k_main, real_t lam, real_t l1_lam, real_t alpha, real_t w_main, real_t w_user, real_t w_main_multiplier, bool apply_log_transf, real_t *restrict BeTBe, real_t *restrict BtB, real_t *restrict BeTBeChol, real_t *restrict CtUbias ); /* offsets.c */ real_t offsets_fun_grad ( real_t *restrict values, real_t *restrict grad, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, int_t m, int_t n, int_t k, real_t *restrict Xfull, bool full_dense, size_t Xcsr_p[], int_t Xcsr_i[], real_t *restrict Xcsr, size_t Xcsc_p[], int_t Xcsc_i[], real_t *restrict Xcsc, real_t *restrict weight, real_t *restrict weightR, real_t *restrict weightC, bool user_bias, bool item_bias, bool add_intercepts, real_t lam, real_t *restrict lam_unique, real_t *restrict U, int_t p, real_t *restrict II, int_t q, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, size_t U_csc_p[], int_t U_csc_i[], real_t *restrict U_csc, size_t I_csr_p[], int_t I_csr_i[], real_t *restrict I_csr, size_t I_csc_p[], int_t I_csc_i[], real_t *restrict I_csc, int_t k_main, int_t k_sec, real_t w_user, real_t w_item, int nthreads, real_t *restrict buffer_real_t, real_t *restrict buffer_mt ); void construct_Am ( real_t *restrict Am, real_t *restrict A, real_t *restrict C, real_t *restrict C_bias, bool add_intercepts, real_t *restrict U, int_t m, int_t p, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, int_t k, int_t k_sec, int_t k_main, real_t w_user, int nthreads ); void assign_gradients ( real_t *restrict bufferA, real_t *restrict g_A, real_t *restrict g_C, bool add_intercepts, real_t *restrict g_C_bias, real_t *restrict U, size_t U_csc_p[], int_t U_csc_i[], real_t *restrict U_csc, int_t m, int_t p, int_t k, int_t k_sec, int_t k_main, real_t w_user, int nthreads ); int_t offsets_factors_cold ( real_t *restrict a_vec, real_t *restrict u_vec, int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec, real_t *restrict C, int_t p, real_t *restrict C_bias, int_t k, int_t k_sec, int_t k_main, real_t w_user ); int_t offsets_factors_warm ( real_t *restrict a_vec, real_t *restrict a_bias, real_t *restrict u_vec, int_t u_vec_ixB[], real_t *restrict u_vec_sp, size_t nnz_u_vec, int_t ixB[], real_t *restrict Xa, size_t nnz, real_t *restrict Xa_dense, int_t n, real_t *restrict weight, real_t *restrict Bm, real_t *restrict C, real_t *restrict C_bias, real_t glob_mean, real_t *restrict biasB, int_t k, int_t k_sec, int_t k_main, int_t p, real_t w_user, real_t lam, bool exact, real_t lam_bias, bool implicit, real_t alpha, real_t *restrict precomputedTransBtBinvBt, real_t *restrict precomputedBtBw, real_t *restrict output_a, real_t *restrict Bm_plus_bias ); int_t precompute_offsets_both ( real_t *restrict A, int_t m, real_t *restrict B, int_t n, real_t *restrict C, int_t p, real_t *restrict D, int_t q, real_t *restrict C_bias, real_t *restrict D_bias, real_t *restrict biasB, real_t glob_mean, bool NA_as_zero_X, bool user_bias, bool add_intercepts, bool implicit, int_t k, int_t k_main, int_t k_sec, real_t lam, real_t *restrict lam_unique, real_t w_user, real_t w_item, real_t *restrict U, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict II, size_t I_csr_p[], int_t I_csr_i[], real_t *restrict I_csr, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, real_t *restrict Am, real_t *restrict Bm, real_t *restrict Bm_plus_bias, real_t *restrict BtB, real_t *restrict TransBtBinvBt ); CMFREC_EXPORTABLE int_t precompute_offsets_explicit ( real_t *restrict A, int_t m, real_t *restrict B, int_t n, real_t *restrict C, int_t p, real_t *restrict D, int_t q, real_t *restrict C_bias, real_t *restrict D_bias, bool user_bias, bool add_intercepts, int_t k, int_t k_main, int_t k_sec, real_t lam, real_t *restrict lam_unique, real_t w_user, real_t w_item, real_t *restrict U, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict II, size_t I_csr_p[], int_t I_csr_i[], real_t *restrict I_csr, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, real_t *restrict Am, real_t *restrict Bm, real_t *restrict Bm_plus_bias, real_t *restrict BtB, real_t *restrict TransBtBinvBt ); CMFREC_EXPORTABLE int_t precompute_offsets_implicit ( real_t *restrict A, int_t m, real_t *restrict B, int_t n, real_t *restrict C, int_t p, real_t *restrict D, int_t q, real_t *restrict C_bias, real_t *restrict D_bias, bool add_intercepts, int_t k, real_t lam, real_t *restrict U, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict II, size_t I_csr_p[], int_t I_csr_i[], real_t *restrict I_csr, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, real_t *restrict Am, real_t *restrict Bm, real_t *restrict BtB ); typedef struct data_offsets_fun_grad { int_t *ixA; int_t *ixB; real_t *X; size_t nnz; int_t m; int_t n; int_t k; real_t *Xfull; bool full_dense; size_t *Xcsr_p; int_t *Xcsr_i; real_t *Xcsr; size_t *Xcsc_p; int_t *Xcsc_i; real_t *Xcsc; real_t *weight; real_t *weightR; real_t *weightC; bool user_bias; bool item_bias; bool add_intercepts; real_t lam; real_t *lam_unique; real_t *U; int_t p; real_t *II; int_t q; size_t *U_csr_p; int_t *U_csr_i; real_t *U_csr; size_t *U_csc_p; int_t *U_csc_i; real_t *U_csc; size_t *I_csr_p; int_t *I_csr_i; real_t *I_csr; size_t *I_csc_p; int_t *I_csc_i; real_t *I_csc; int_t k_main; int_t k_sec; real_t w_user; real_t w_item; int nthreads; real_t *buffer_real_t; real_t *buffer_mt; int_t print_every; int_t nfev; int_t niter; } data_offsets_fun_grad; real_t wrapper_offsets_fun_grad ( void *instance, real_t *x, real_t *g, const size_t n, const real_t step ); int_t fit_offsets_explicit_lbfgs_internal ( real_t *restrict values, bool reset_values, real_t *restrict glob_mean, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, real_t *restrict weight, bool user_bias, bool item_bias, bool center, bool add_intercepts, real_t lam, real_t *restrict lam_unique, real_t *restrict U, int_t p, real_t *restrict II, int_t q, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, int_t k_main, int_t k_sec, real_t w_user, real_t w_item, int_t n_corr_pairs, size_t maxiter, int_t seed, int nthreads, bool prefer_onepass, bool verbose, int_t print_every, bool handle_interrupt, int_t *restrict niter, int_t *restrict nfev, real_t *restrict Am, real_t *restrict Bm, real_t *restrict Bm_plus_bias ); CMFREC_EXPORTABLE int_t fit_offsets_explicit_lbfgs ( real_t *restrict biasA, real_t *restrict biasB, real_t *restrict A, real_t *restrict B, real_t *restrict C, real_t *restrict C_bias, real_t *restrict D, real_t *restrict D_bias, bool reset_values, int_t seed, real_t *restrict glob_mean, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, real_t *restrict weight, bool user_bias, bool item_bias, bool center, bool add_intercepts, real_t lam, real_t *restrict lam_unique, real_t *restrict U, int_t p, real_t *restrict II, int_t q, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, int_t k_main, int_t k_sec, real_t w_user, real_t w_item, int_t n_corr_pairs, size_t maxiter, int nthreads, bool prefer_onepass, bool verbose, int_t print_every, bool handle_interrupt, int_t *restrict niter, int_t *restrict nfev, bool precompute_for_predictions, real_t *restrict Am, real_t *restrict Bm, real_t *restrict Bm_plus_bias, real_t *restrict precomputedBtB, real_t *restrict precomputedTransBtBinvBt ); int_t fit_offsets_als ( real_t *restrict biasA, real_t *restrict biasB, real_t *restrict A, real_t *restrict B, real_t *restrict C, real_t *restrict C_bias, real_t *restrict D, real_t *restrict D_bias, bool reset_values, int_t seed, real_t *restrict glob_mean, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, real_t *restrict weight, bool user_bias, bool item_bias, bool center, bool add_intercepts, real_t lam, real_t *restrict U, int_t p, real_t *restrict II, int_t q, bool implicit, bool NA_as_zero_X, real_t alpha, bool apply_log_transf, int_t niter, int nthreads, bool use_cg, int_t max_cg_steps, bool finalize_chol, bool verbose, bool handle_interrupt, bool precompute_for_predictions, real_t *restrict Am, real_t *restrict Bm, real_t *restrict Bm_plus_bias, real_t *restrict precomputedBtB, real_t *restrict precomputedTransBtBinvBt ); CMFREC_EXPORTABLE int_t fit_offsets_explicit_als ( real_t *restrict biasA, real_t *restrict biasB, real_t *restrict A, real_t *restrict B, real_t *restrict C, real_t *restrict C_bias, real_t *restrict D, real_t *restrict D_bias, bool reset_values, int_t seed, real_t *restrict glob_mean, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, real_t *restrict weight, bool user_bias, bool item_bias, bool center, bool add_intercepts, real_t lam, real_t *restrict U, int_t p, real_t *restrict II, int_t q, bool NA_as_zero_X, int_t niter, int nthreads, bool use_cg, int_t max_cg_steps, bool finalize_chol, bool verbose, bool handle_interrupt, bool precompute_for_predictions, real_t *restrict Am, real_t *restrict Bm, real_t *restrict Bm_plus_bias, real_t *restrict precomputedBtB, real_t *restrict precomputedTransBtBinvBt ); CMFREC_EXPORTABLE int_t fit_offsets_implicit_als ( real_t *restrict A, real_t *restrict B, real_t *restrict C, real_t *restrict C_bias, real_t *restrict D, real_t *restrict D_bias, bool reset_values, int_t seed, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, bool add_intercepts, real_t lam, real_t *restrict U, int_t p, real_t *restrict II, int_t q, real_t alpha, bool apply_log_transf, int_t niter, int nthreads, bool use_cg, int_t max_cg_steps, bool finalize_chol, bool verbose, bool handle_interrupt, bool precompute_for_predictions, real_t *restrict Am, real_t *restrict Bm, real_t *restrict precomputedBtB ); int_t matrix_content_based ( real_t *restrict Am_new, int_t m_new, int_t k, real_t *restrict U, int_t p, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict C, real_t *restrict C_bias, int nthreads ); CMFREC_EXPORTABLE int_t factors_offsets_explicit_single ( real_t *restrict a_vec, real_t *restrict a_bias, real_t *restrict output_a, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict Xa_dense, int_t n, real_t *restrict weight, real_t *restrict Bm, real_t *restrict C, real_t *restrict C_bias, real_t glob_mean, real_t *restrict biasB, int_t k, int_t k_sec, int_t k_main, real_t w_user, real_t lam, real_t *restrict lam_unique, bool exact, real_t *restrict precomputedTransBtBinvBt, real_t *restrict precomputedBtB, real_t *restrict Bm_plus_bias ); CMFREC_EXPORTABLE int_t factors_offsets_implicit_single ( real_t *restrict a_vec, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict Bm, real_t *restrict C, real_t *restrict C_bias, int_t k, int_t n, real_t lam, real_t alpha, bool apply_log_transf, real_t *restrict precomputedBtB, real_t *restrict output_a ); CMFREC_EXPORTABLE int_t factors_offsets_explicit_multiple ( real_t *restrict Am, real_t *restrict biasA, real_t *restrict A, int_t m, real_t *restrict U, int_t p, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz, size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr, real_t *restrict Xfull, int_t n, real_t *restrict weight, real_t *restrict Bm, real_t *restrict C, real_t *restrict C_bias, real_t glob_mean, real_t *restrict biasB, int_t k, int_t k_sec, int_t k_main, real_t w_user, real_t lam, real_t *restrict lam_unique, bool exact, real_t *restrict precomputedTransBtBinvBt, real_t *restrict precomputedBtB, real_t *restrict Bm_plus_bias, int nthreads ); CMFREC_EXPORTABLE int_t factors_offsets_implicit_multiple ( real_t *restrict Am, int_t m, real_t *restrict A, real_t *restrict U, int_t p, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz, size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr, real_t *restrict Bm, real_t *restrict C, real_t *restrict C_bias, int_t k, int_t n, real_t lam, real_t alpha, bool apply_log_transf, real_t *restrict precomputedBtB, int nthreads ); CMFREC_EXPORTABLE int_t topN_old_offsets_explicit ( real_t *restrict a_vec, real_t a_bias, real_t *restrict Am, real_t *restrict biasA, int_t row_index, real_t *restrict Bm, real_t *restrict biasB, real_t glob_mean, int_t k, int_t k_sec, int_t k_main, int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int_t n, int nthreads ); CMFREC_EXPORTABLE int_t topN_old_offsets_implicit ( real_t *restrict a_vec, real_t *restrict Am, int_t row_index, real_t *restrict Bm, int_t k, int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int_t n, int nthreads ); CMFREC_EXPORTABLE int_t topN_new_offsets_explicit ( /* inputs for factors */ bool user_bias, int_t n, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict Xa_dense, real_t *restrict weight, real_t *restrict Bm, real_t *restrict C, real_t *restrict C_bias, real_t glob_mean, real_t *restrict biasB, int_t k, int_t k_sec, int_t k_main, real_t w_user, real_t lam, real_t *restrict lam_unique, bool exact, real_t *restrict precomputedTransBtBinvBt, real_t *restrict precomputedBtB, real_t *restrict Bm_plus_bias, /* inputs for topN */ int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int nthreads ); CMFREC_EXPORTABLE int_t topN_new_offsets_implicit ( /* inputs for factors */ real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict Xa, int_t ixB[], size_t nnz, real_t *restrict Bm, real_t *restrict C, real_t *restrict C_bias, int_t k, real_t lam, real_t alpha, bool apply_log_transf, real_t *restrict precomputedBtB, /* inputs for topN */ int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int_t n, int nthreads ); CMFREC_EXPORTABLE int_t predict_X_old_offsets_explicit ( int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict, real_t *restrict Am, real_t *restrict biasA, real_t *restrict Bm, real_t *restrict biasB, real_t glob_mean, int_t k, int_t k_sec, int_t k_main, int_t m, int_t n, int nthreads ); CMFREC_EXPORTABLE int_t predict_X_old_offsets_implicit ( int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict, real_t *restrict Am, real_t *restrict Bm, int_t k, int_t m, int_t n, int nthreads ); CMFREC_EXPORTABLE int_t predict_X_new_offsets_explicit ( /* inputs for predictions */ int_t m_new, bool user_bias, int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict, int nthreads, /* inputs for factors */ real_t *restrict U, int_t p, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz, size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr, real_t *restrict Xfull, int_t n, /* <- 'n' MUST be passed */ real_t *restrict weight, real_t *restrict Bm, real_t *restrict C, real_t *restrict C_bias, real_t glob_mean, real_t *restrict biasB, int_t k, int_t k_sec, int_t k_main, real_t w_user, real_t lam, real_t *restrict lam_unique, bool exact, real_t *restrict precomputedTransBtBinvBt, real_t *restrict precomputedBtB, real_t *restrict Bm_plus_bias ); CMFREC_EXPORTABLE int_t predict_X_new_offsets_implicit ( /* inputs for predictions */ int_t m_new, int_t row[], int_t col[], real_t *restrict predicted, size_t n_predict, int_t n_orig, int nthreads, /* inputs for factors */ real_t *restrict U, int_t p, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict X, int_t ixA[], int_t ixB[], size_t nnz, size_t *restrict Xcsr_p, int_t *restrict Xcsr_i, real_t *restrict Xcsr, real_t *restrict Bm, real_t *restrict C, real_t *restrict C_bias, int_t k, real_t lam, real_t alpha, bool apply_log_transf, real_t *restrict precomputedBtB ); CMFREC_EXPORTABLE int_t fit_content_based_lbfgs ( real_t *restrict biasA, real_t *restrict biasB, real_t *restrict C, real_t *restrict C_bias, real_t *restrict D, real_t *restrict D_bias, bool start_with_ALS, bool reset_values, int_t seed, real_t *restrict glob_mean, int_t m, int_t n, int_t k, int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz, real_t *restrict Xfull, real_t *restrict weight, bool user_bias, bool item_bias, bool add_intercepts, real_t lam, real_t *restrict lam_unique, real_t *restrict U, int_t p, real_t *restrict II, int_t q, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, int_t n_corr_pairs, size_t maxiter, int nthreads, bool prefer_onepass, bool verbose, int_t print_every, bool handle_interrupt, int_t *restrict niter, int_t *restrict nfev, real_t *restrict Am, real_t *restrict Bm ); CMFREC_EXPORTABLE int_t factors_content_based_single ( real_t *restrict a_vec, int_t k, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict C, real_t *restrict C_bias ); CMFREC_EXPORTABLE int_t factors_content_based_mutliple ( real_t *restrict Am, int_t m_new, int_t k, real_t *restrict C, real_t *restrict C_bias, real_t *restrict U, int_t p, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, int nthreads ); CMFREC_EXPORTABLE int_t topN_old_content_based ( real_t *restrict a_vec, real_t a_bias, real_t *restrict Am, real_t *restrict biasA, int_t row_index, real_t *restrict Bm, real_t *restrict biasB, real_t glob_mean, int_t k, int_t *restrict include_ix, int_t n_include, int_t *restrict exclude_ix, int_t n_exclude, int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int_t n, int nthreads ); CMFREC_EXPORTABLE int_t topN_new_content_based ( /* inputs for the factors */ int_t k, int_t n_new, real_t *restrict u_vec, int_t p, real_t *restrict u_vec_sp, int_t u_vec_ixB[], size_t nnz_u_vec, real_t *restrict II, int_t q, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, size_t I_csr_p[], int_t I_csr_i[], real_t *restrict I_csr, real_t *restrict C, real_t *restrict C_bias, real_t *restrict D, real_t *restrict D_bias, real_t glob_mean, /* inputs for topN */ int_t *restrict outp_ix, real_t *restrict outp_score, int_t n_top, int nthreads ); CMFREC_EXPORTABLE int_t predict_X_old_content_based ( real_t *restrict predicted, size_t n_predict, int_t m_new, int_t k, int_t row[], /* <- optional */ int_t col[], int_t m_orig, int_t n_orig, real_t *restrict U, int_t p, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict C, real_t *restrict C_bias, real_t *restrict Bm, real_t *restrict biasB, real_t glob_mean, int nthreads ); CMFREC_EXPORTABLE int_t predict_X_new_content_based ( real_t *restrict predicted, size_t n_predict, int_t m_new, int_t n_new, int_t k, int_t row[], int_t col[], /* <- optional */ real_t *restrict U, int_t p, int_t U_row[], int_t U_col[], real_t *restrict U_sp, size_t nnz_U, size_t U_csr_p[], int_t U_csr_i[], real_t *restrict U_csr, real_t *restrict II, int_t q, int_t I_row[], int_t I_col[], real_t *restrict I_sp, size_t nnz_I, size_t I_csr_p[], int_t I_csr_i[], real_t *restrict I_csr, real_t *restrict C, real_t *restrict C_bias, real_t *restrict D, real_t *restrict D_bias, real_t glob_mean, int nthreads ); #ifdef __cplusplus } #endif
hmacSHA512_fmt_plug.c
/* * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * Based on hmac-md5 by Bartavelle * * SIMD added Feb, 2015, JimF. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_hmacSHA512; extern struct fmt_main fmt_hmacSHA384; #elif FMT_REGISTERS_H john_register_one(&fmt_hmacSHA512); john_register_one(&fmt_hmacSHA384); #else #include "sha2.h" #include "arch.h" #include "misc.h" #include "common.h" #include "base64_convert.h" #include "formats.h" #include "aligned.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP #include <omp.h> #ifdef SIMD_COEF_64 #ifndef OMP_SCALE #define OMP_SCALE 1024 // scaled on core i7-quad HT #endif #else #ifndef OMP_SCALE #define OMP_SCALE 512 // scaled K8-dual HT #endif #endif #endif #include "memdbg.h" #define FORMAT_LABEL "HMAC-SHA512" #define FORMAT_LABEL_384 "HMAC-SHA384" #define FORMAT_NAME "" #define ALGORITHM_NAME "password is key, SHA512 " SHA512_ALGORITHM_NAME #define ALGORITHM_NAME_384 "password is key, SHA384 " SHA512_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define PAD_SIZE 128 #define PAD_SIZE_W (PAD_SIZE/8) #define BINARY_SIZE (512/8) #define BINARY_SIZE_384 (384/8) #define BINARY_ALIGN 8 #ifndef SIMD_COEF_64 #define SALT_LENGTH 1023 #define SALT_ALIGN 1 #else #define SALT_LIMBS 2 /* 2 limbs, 239 bytes */ #define SALT_LENGTH (SALT_LIMBS * PAD_SIZE - 17) #define SALT_ALIGN MEM_ALIGN_SIMD #endif #define CIPHERTEXT_LENGTH (SALT_LENGTH + 1 + BINARY_SIZE * 2) #define CIPHERTEXT_LENGTH_384 (SALT_LENGTH + 1 + BINARY_SIZE_384 * 2) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i&127)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i&127)&7)) + index/SIMD_COEF_64 * PAD_SIZE * SIMD_COEF_64 ) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"what do ya want for nothing?#164b7a7bfcf819e2e395fbe73b56e0a387bd64222e831fd610270cd7ea2505549758bf75c05a994a6d034f65f8f0e6fdcaeab1a34d4a6b4b636e070a38bce737", "Jefe"}, {"Reference hashes are keys to success#73a5eff716d0147a440fdf5aff187c52deab8c4dc55073be3d5742e788a99fd6b53a5894725f0f88f3486b5bb63d2af930a0cf6267af572128273daf8eee4cfa", "The magnum"}, {"Beppe#Grillo#AB08C46822313481D548412A084F08C7CA3BBF8A98D901D14698759F4C36ADB07528348D56CAF4F6AF654E14FC102FF10DCF50794A82544426386C7BE238CEAF", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, {"hjXNSoAhq2YLm2vSFtc7BCJNUS9RNPcl#1c10f4d7237b034f27e7af81705e6cb0acecac395086e81e55a391a12b60b49e375b2de39c94f4989a50604754ffeea0b379ae1d4cc6b3550cd0a24a582ef351", "1"}, {"JkbHdY2Biype3gv2TpG2Wnv68OF7p6cl#a1f6e131e2fe1f728c5f2b8d0d8af9a6e202868ab9abef0e8f9126a712a4ae7f10533bbdedb710f6a521302c48a743caab1715aa85c4a57fbd51fde5e07945d9", "22"}, {"X4eOvWZw1b9L1NiND4vQxutubtrGhzNe#5a6002cedb05b97ce13393acab09767005a611dfc3e306305772c614ff4869077b3080f23694d3efc6d1998b4514fe8316389edb5f61dbcea8bd3b4d01595ae1", "333"}, {"VYG7HeRZLyie5jdzDRaqfd0yYX8PFstX#dd2b8b8a97c56af68fef5e73bf1eceec0c951084f97b66196b32758ed8b34a8d2f0e10663acac662e393fd42c0043e4cedf0d3c617ed43ba61b0297353fc2e2a", "4444"}, {"x8nIFPPTMJMEZLMSELpEub6bQjQzyjkq#fb92efe7d0abff004c8dc94c64356536df65dd42c323da1de4c583c255135b1a15002efc0b794683e7ac4ea7e7ae3813fb132b43c86a6951059a1574908987fb", "55555"}, {"Hr8KfafSSsEJfp5HZRLVAGQFrEPTDiSi#752e874177fc0f31149ebc699c32b2f7f600ad4d28f1fc27eb715a328100e6e67ff2845b20acd9ebc4befc7a629f1bd9a5b96abf981dcaba71317dcbb8cfdfba", "666666"}, {"UH0LvhZUihMMECAW0Ummw2OSgAOzV0i9#de3d4986007b1f45542f1d38d294ac69a0e23e2985103082a6ee134d4c786cfcb61d90be72388280e119e047bab32e68c6615d45d21895e5b8ef2b7eaf7258fd", "7777777"}, {"hX4OqAvhCjwEPwsi9I7SlIQbmlDb6LDh#cbf4fbb0721c9ec00af347d78046c314087efcbce47ef732e119433dc6f7fe3d2788e0a20d76bd2b1f9b199c9914eeaee0a51a2fb88cfbb7472b538e45b53711", "88888888"}, {"gOONPyTnQVKWMvh61x8Y1JGlDalKCBAE#9d4d34c76cb2a4cbecb8929be61dd4af5088a055bd338cd245311786c4119a5b526b72646626fff1cb4931eb0fe05d8a7648a66f0db1f2522b8af1cfc2ac8e74", "999999999"}, {"F3WBOJKUyVWbnqtGZ2ur8uW0nqIBpObK#6043dd6dd3dd96699db8351b0db762af27a5db06169ec6668e9f464fcc3fdf1d7deafaccb67e5ef7f5ee96b2a5efad33a8af20eb19fe60d8b20e7994c76a0610", "0000000000"}, {"pfZzfOSVpQvuILYEIAeCT8Xnj7eQnR2w#ff80da7bbcdb11fd8bb282a80603ed34847d897701fd547d06f4438072ecd43058a3b7c0b3a296f7c5dbbf06beb3825d1eb7122f01ad78ef2afc5ab09c46ca45", "11111111111"}, /* mockup JWT hash */ {"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjEyMzQ1Njc4OTAsIm5hbWUiOiJKb2huIERvZSIsImFkbWluIjp0cnVlfQ.r7FDU+ahrbW0Wtsekh5UNqV2iyXGrQQaRZjdc8i733QIoTSIQM//FSGjP151C2ijvNUVo5syWOW+RpZc7khU1g", "magnum"}, {NULL} }; static struct fmt_tests tests_384[] = { {"what do ya want for nothing?#af45d2e376484031617f78d2b58a6b1b9c7ef464f5a01b47e42ec3736322445e8e2240ca5e69e2c78b3239ecfab21649", "Jefe"}, {"Beppe#Grillo#8361922C63506E53714F8A8491C6621A76CF0FD6DFEAD91BF59B420A23DFF2745C0A0D5E142D4F937E714EA8C228835B", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."}, /* mockup JWT hash */ {"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOjEyMzQ1Njc4OTAsIm5hbWUiOiJKb2huIERvZSIsImFkbWluIjp0cnVlfQ.WNzjJCdDCTV3hLfsRy//hny9VzlaZXHFvoKSJXB5/rbKkXwE1Jve/DUirW7r5ztm", "magnum"}, {NULL} }; #ifdef SIMD_COEF_64 static unsigned char *crypt_key; static unsigned char *ipad, *prep_ipad; static unsigned char *opad, *prep_opad; typedef struct cur_salt_t { unsigned char salt[SALT_LIMBS][PAD_SIZE * MAX_KEYS_PER_CRYPT]; int salt_len; } cur_salt_t; static cur_salt_t *cur_salt; static int bufsize; #define SALT_SIZE sizeof(cur_salt_t) #else static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)]; static unsigned char (*opad)[PAD_SIZE]; static unsigned char (*ipad)[PAD_SIZE]; static unsigned char cur_salt[SALT_LENGTH+1]; static SHA512_CTX *ipad_ctx; static SHA512_CTX *opad_ctx; #define SALT_SIZE sizeof(cur_salt) #endif static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int new_keys; #ifdef SIMD_COEF_64 static void clear_keys(void) { memset(ipad, 0x36, bufsize); memset(opad, 0x5C, bufsize); } #endif static void init(struct fmt_main *self, const int B_LEN) { #ifdef SIMD_COEF_64 int i; #endif #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_64 bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * PAD_SIZE; crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); prep_opad = mem_calloc_align(self->params.max_keys_per_crypt, BINARY_SIZE, MEM_ALIGN_SIMD); for (i = 0; i < self->params.max_keys_per_crypt; ++i) { crypt_key[GETPOS(B_LEN, i)] = 0x80; ((uint64_t*)crypt_key)[15 * SIMD_COEF_64 + (i&(SIMD_COEF_64-1)) + (i/SIMD_COEF_64) * PAD_SIZE_W * SIMD_COEF_64] = (B_LEN + PAD_SIZE) << 3; } clear_keys(); #else crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); ipad = mem_calloc(sizeof(*ipad), self->params.max_keys_per_crypt); opad = mem_calloc(sizeof(*opad), self->params.max_keys_per_crypt); ipad_ctx = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*opad_ctx), 8); opad_ctx = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*opad_ctx), 8); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void init_512(struct fmt_main *self) { init(self, BINARY_SIZE); } static void init_384(struct fmt_main *self) { init(self, BINARY_SIZE_384); } static void done(void) { MEM_FREE(saved_plain); #ifdef SIMD_COEF_64 MEM_FREE(prep_opad); MEM_FREE(prep_ipad); #else MEM_FREE(opad_ctx); MEM_FREE(ipad_ctx); #endif MEM_FREE(opad); MEM_FREE(ipad); MEM_FREE(crypt_key); } static char *split(char *ciphertext, int index, struct fmt_main *self, const int B_LEN, const int CT_LEN) { static char out[CIPHERTEXT_LENGTH + 1]; if (strstr(ciphertext, "$SOURCE_HASH$")) return ciphertext; if (!strchr(ciphertext, '#') && strchr(ciphertext, '.') && strchr(ciphertext, '.') != strrchr(ciphertext, '.')) { // Treat this like a JWT hash. Convert into 'normal' hmac-sha512 format. char buf[BINARY_SIZE * 2 + 1], tmp[CIPHERTEXT_LENGTH + 1], *cpi; strnzcpy(tmp, ciphertext, sizeof(tmp)); cpi = strchr(tmp, '.'); cpi = strchr(&cpi[1], '.'); if (cpi - tmp + B_LEN * 2 + 1 > CT_LEN) return ciphertext; *cpi++ = 0; memset(buf, 0, sizeof(buf)); base64_convert(cpi, e_b64_mime, strlen(cpi), buf, e_b64_hex, sizeof(buf), flg_Base64_NO_FLAGS, 0); if (strlen(buf) != B_LEN * 2) return ciphertext; sprintf(out, "%s#%s", tmp, buf); } else strnzcpy(out, ciphertext, sizeof(out)); strlwr(strrchr(out, '#')); return out; } static char *split_512(char *ciphertext, int index, struct fmt_main *self) { return split(ciphertext, index, self, BINARY_SIZE, CIPHERTEXT_LENGTH); } static char *split_384(char *ciphertext, int index, struct fmt_main *self) { return split(ciphertext, index, self, BINARY_SIZE_384, CIPHERTEXT_LENGTH_384); } static int valid(char *ciphertext, struct fmt_main *self, const int B_LEN, const int CT_LEN) { int pos, i; char *p; p = strrchr(ciphertext, '#'); // allow # in salt if (!p && strchr(ciphertext, '.') && strchr(ciphertext, '.') != strrchr(ciphertext, '.')) { if (strlen(ciphertext) > CT_LEN) return 0; ciphertext = split(ciphertext, 0, self, B_LEN, CT_LEN); p = strrchr(ciphertext, '#'); } if (!p || p > &ciphertext[strlen(ciphertext)-1]) return 0; i = (int)(p - ciphertext); if (i > SALT_LENGTH) return 0; pos = i + 1; if (strlen(ciphertext + pos) != B_LEN * 2) return 0; for (i = pos; i < B_LEN * 2 + pos; i++) { if (!( (('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) || (('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) || (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F')))) return 0; } return 1; } static int valid_512(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, BINARY_SIZE, CIPHERTEXT_LENGTH); } static int valid_384(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, BINARY_SIZE_384, CIPHERTEXT_LENGTH_384); } static void set_salt(void *salt) { #ifdef SIMD_COEF_64 cur_salt = salt; #else strcpy((char*)cur_salt, (char*)salt); #endif } static MAYBE_INLINE void set_key(char *key, int index, const int B_LEN) { int len; #ifdef SIMD_COEF_64 uint64_t *ipadp = (uint64_t*)&ipad[GETPOS(7, index)]; uint64_t *opadp = (uint64_t*)&opad[GETPOS(7, index)]; const uint64_t *keyp = (uint64_t*)key; uint64_t temp; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; if (len > PAD_SIZE) { unsigned char k0[BINARY_SIZE]; SHA512_CTX ctx; int i; if (B_LEN == BINARY_SIZE) { SHA512_Init(&ctx); SHA512_Update(&ctx, key, len); SHA512_Final(k0, &ctx); } else { SHA384_Init(&ctx); SHA384_Update(&ctx, key, len); SHA384_Final(k0, &ctx); } keyp = (uint64_t*)k0; for (i = 0; i < B_LEN / 8; i++, ipadp += SIMD_COEF_64, opadp += SIMD_COEF_64) { temp = JOHNSWAP64(*keyp++); *ipadp ^= temp; *opadp ^= temp; } } else while(((temp = JOHNSWAP64(*keyp++)) & 0xff00000000000000ULL)) { if (!(temp & 0x00ff000000000000ULL) || !(temp & 0x0000ff0000000000ULL)) { ((unsigned short*)ipadp)[3] ^= (unsigned short)(temp >> 48); ((unsigned short*)opadp)[3] ^= (unsigned short)(temp >> 48); break; } if (!(temp & 0x00ff00000000ULL) || !(temp & 0x0000ff000000ULL)) { ((uint32_t*)ipadp)[1] ^= (uint32_t)(temp >> 32); ((uint32_t*)opadp)[1] ^= (uint32_t)(temp >> 32); break; } if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00)) { ((uint32_t*)ipadp)[1] ^= (uint32_t)(temp >> 32); ((uint32_t*)opadp)[1] ^= (uint32_t)(temp >> 32); ((unsigned short*)ipadp)[1] ^= (unsigned short)(temp >> 16); ((unsigned short*)opadp)[1] ^= (unsigned short)(temp >> 16); break; } *ipadp ^= temp; *opadp ^= temp; if (!(temp & 0xff)) break; ipadp += SIMD_COEF_64; opadp += SIMD_COEF_64; } #else int i; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; memset(ipad[index], 0x36, PAD_SIZE); memset(opad[index], 0x5C, PAD_SIZE); if (len > PAD_SIZE) { SHA512_CTX ctx; unsigned char k0[BINARY_SIZE]; if (B_LEN == BINARY_SIZE) { SHA512_Init( &ctx ); SHA512_Update( &ctx, key, len); SHA512_Final( k0, &ctx); } else { SHA384_Init( &ctx ); SHA384_Update( &ctx, key, len); SHA384_Final( k0, &ctx); } len = B_LEN; for (i=0;i<len;i++) { ipad[index][i] ^= k0[i]; opad[index][i] ^= k0[i]; } } else for (i=0;i<len;i++) { ipad[index][i] ^= key[i]; opad[index][i] ^= key[i]; } #endif new_keys = 1; } static void set_key_512(char *key, int index) { set_key(key, index, BINARY_SIZE); } static void set_key_384(char *key, int index) { set_key(key, index, BINARY_SIZE_384); } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_64 unsigned int index; for (index = 0; index < count; index++) { // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_64) if (((uint64_t*)binary)[0] == ((uint64_t*)crypt_key)[(index&(SIMD_COEF_64-1))+index/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64]) return 1; } return 0; #else int index = 0; #if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1) for (; index < count; index++) #endif if (((uint32_t*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static int cmp_one(void *binary, int index, int B_LEN) { #ifdef SIMD_COEF_64 int i; for (i = 0; i < (B_LEN/8); i++) // NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_64) if (((uint64_t*)binary)[i] != ((uint64_t*)crypt_key)[i * SIMD_COEF_64 + (index & (SIMD_COEF_64-1)) + (index/SIMD_COEF_64) * PAD_SIZE_W * SIMD_COEF_64]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], B_LEN); #endif } static int cmp_one_512(void *binary, int index) { return cmp_one(binary, index, BINARY_SIZE); } static int cmp_one_384(void *binary, int index) { return cmp_one(binary, index, BINARY_SIZE_384); } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt, #ifdef SIMD_COEF_64 const unsigned EX_FLAGS #else const int B_LEN #endif ) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_64 unsigned int i; if (new_keys) { SIMDSHA512body(&ipad[index * PAD_SIZE], (uint64_t*)&prep_ipad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN|EX_FLAGS); SIMDSHA512body(&opad[index * PAD_SIZE], (uint64_t*)&prep_opad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN|EX_FLAGS); } SIMDSHA512body(cur_salt->salt[0], (uint64_t*)&crypt_key[index * PAD_SIZE], (uint64_t*)&prep_ipad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); for (i = 1; i <= (cur_salt->salt_len + 16) / PAD_SIZE; i++) SIMDSHA512body(cur_salt->salt[i], (uint64_t*)&crypt_key[index * PAD_SIZE], (uint64_t*)&crypt_key[index * PAD_SIZE], SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); if (EX_FLAGS) { // NOTE, SSESHA384 will output 64 bytes. We need the first 48 (plus the 0x80 padding). // so we are forced to 'clean' this crap up, before using the crypt as the input. uint64_t *pclear = (uint64_t*)&crypt_key[index/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64*8]; for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { pclear[48/8*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64] = 0x8000000000000000ULL; pclear[48/8*SIMD_COEF_64+(i&(SIMD_COEF_64-1))+i/SIMD_COEF_64*PAD_SIZE_W*SIMD_COEF_64+SIMD_COEF_64] = 0; } } SIMDSHA512body(&crypt_key[index * PAD_SIZE], (uint64_t*)&crypt_key[index * PAD_SIZE], (uint64_t*)&prep_opad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT|EX_FLAGS); #else SHA512_CTX ctx; // Note, for oSSL, we really only need SHA512_Init and SHA384_Init. From that point // on, SHA512_Update/SHA512_Final can be used. Also, jtr internal sha2.c file works // like that. BUT I am not sure every hash engine works that way, so we are keeping // the 'full' block. if (B_LEN == BINARY_SIZE) { if (new_keys) { SHA512_Init(&ipad_ctx[index]); SHA512_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); SHA512_Init(&opad_ctx[index]); SHA512_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA512_Update( &ctx, cur_salt, strlen( (char*) cur_salt) ); SHA512_Final( (unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA512_Update( &ctx, crypt_key[index], B_LEN); SHA512_Final( (unsigned char*) crypt_key[index], &ctx); } else { if (new_keys) { SHA384_Init(&ipad_ctx[index]); SHA384_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); SHA384_Init(&opad_ctx[index]); SHA384_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA384_Update( &ctx, cur_salt, strlen( (char*) cur_salt) ); SHA384_Final( (unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA384_Update( &ctx, crypt_key[index], B_LEN); SHA384_Final( (unsigned char*) crypt_key[index], &ctx); } #endif } new_keys = 0; return count; } static int crypt_all_512(int *pcount, struct db_salt *salt) { #ifdef SIMD_COEF_64 return crypt_all(pcount, salt, 0); #else return crypt_all(pcount, salt, BINARY_SIZE); #endif } static int crypt_all_384(int *pcount, struct db_salt *salt) { #ifdef SIMD_COEF_64 return crypt_all(pcount, salt, SSEi_CRYPT_SHA384); #else return crypt_all(pcount, salt, BINARY_SIZE_384); #endif } static void *get_binary(char *ciphertext, const int B_LEN) { JTR_ALIGN(BINARY_ALIGN) static unsigned char realcipher[BINARY_SIZE]; int i,pos; for (i=strlen(ciphertext);ciphertext[i]!='#';i--); // allow # in salt pos=i+1; for (i=0;i<B_LEN;i++) realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+pos])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1+pos])]; #ifdef SIMD_COEF_64 alter_endianity_w64(realcipher, B_LEN/8); #endif return (void*)realcipher; } static void *get_binary_512(char *ciphertext) { return get_binary(ciphertext, BINARY_SIZE); } static void *get_binary_384(char *ciphertext) { return get_binary(ciphertext, BINARY_SIZE_384); } static void *get_salt(char *ciphertext) { static unsigned char salt[SALT_LENGTH+1]; int len; #ifdef SIMD_COEF_64 unsigned int i = 0; static JTR_ALIGN(MEM_ALIGN_SIMD) cur_salt_t cur_salt; int salt_len = 0; #endif // allow # in salt len = strrchr(ciphertext, '#') - ciphertext; memset(salt, 0, sizeof(salt)); memcpy(salt, ciphertext, len); #ifdef SIMD_COEF_64 memset(&cur_salt, 0, sizeof(cur_salt)); while(((unsigned char*)salt)[salt_len]) { for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = ((unsigned char*)salt)[salt_len]; ++salt_len; } cur_salt.salt_len = salt_len; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = 0x80; ((uint64_t*)cur_salt.salt[(salt_len+16) / PAD_SIZE])[15 * SIMD_COEF_64 + (i & (SIMD_COEF_64-1)) + (i/SIMD_COEF_64) * PAD_SIZE_W * SIMD_COEF_64] = (salt_len + PAD_SIZE) << 3; } return &cur_salt; #else return salt; #endif } struct fmt_main fmt_hmacSHA512 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { NULL }, tests }, { init_512, done, fmt_default_reset, fmt_default_prepare, valid_512, split_512, get_binary_512, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key_512, get_key, #ifdef SIMD_COEF_64 clear_keys, #else fmt_default_clear_keys, #endif crypt_all_512, { fmt_default_get_hash }, cmp_all, cmp_one_512, cmp_exact } }; struct fmt_main fmt_hmacSHA384 = { { FORMAT_LABEL_384, FORMAT_NAME, ALGORITHM_NAME_384, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_384, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { NULL }, tests_384 }, { init_384, done, fmt_default_reset, fmt_default_prepare, valid_384, split_384, get_binary_384, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key_384, get_key, #ifdef SIMD_COEF_64 clear_keys, #else fmt_default_clear_keys, #endif crypt_all_384, { fmt_default_get_hash }, cmp_all, cmp_one_384, cmp_exact } }; #endif /* plugin stanza */
mat_mul.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> void readMatrix(double** m, FILE* file, int rows, int cols); void printMatrix(double** m, FILE* file, int rows, int cols); void matrixMul(double** m1, double** m2, double** m3, int m, int p); /* * The file which contains a matrix has in its first row the dimensions * then using fscanf each element of the matrix is stored on the memory allocated dynamically */ void readMatrix(double** m, FILE* file, int rows, int cols) { int i, j; for(i = 0; i < rows; i++) { m[i] = (double*)malloc(cols*sizeof(double)); } for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { fscanf(file, "%lf", &m[i][j]); } } } /* The opposite operation of readMatrix. Stores a matrix into a file, element by element */ void printMatrix(double** m, FILE* file, int rows, int cols) { int i, j; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { fprintf(file, "%lf ", m[i][j]); } fprintf(file, "\n"); } } /* * Performs the multiplication operation between the matrices m1 and m2. * The result will be stored in the matrix m3. * The algorithm is practically the one that can be found here: https://en.wikipedia.org/wiki/Matrix_multiplication#Definition */ void matrixMul(double** m1, double** m2, double** m3, int m, int p) { int i, j, k; for(i = 0; i < m; i++) { m3[i] = (double*)malloc(p * sizeof(double)); memset(m3[i], 0, p * sizeof(double)); } #pragma omp parallel shared(m1, m2, m3) private(i, j, k) { #pragma omp for schedule (dynamic) //the iterations of the first for loop are shared among threads, this means that each thread calculates a row of the resulting matrix for (i = 0; i < m; i++) { for (j = 0; j < p; j++) { for (k = 0; k < p; k++) { m3[i][j] += m1[i][k] * m2[k][j]; //there are no data races since each thread access independetly to only one m3 element at a time } } } } } int main(int argc, char* argv[]) { if(argc != 3){ //1- exe name, 2- mat1.txt, 3- mat2.txt printf("Parameter error.\n"); exit(1); } FILE *mat1, *mat2, *resultFile; double t; int m, n1, n2, p, i; mat1 = fopen(argv[1], "r"); mat2 = fopen(argv[2], "r"); fscanf(mat1, "%d %d", &m, &n1); fscanf(mat2, "%d %d", &n2, &p); /* Multiplication is permitted if m1 is m x n and m2 is n x p, m1 must have the same number of column of the rows of m2 matrix */ if(n1 != n2) { printf("It is not possible to do matrix multiplication. Check matrix number of rows and cols.\n"); fclose(mat1); fclose(mat2); exit(1); } double **m1 = (double **)malloc(m*sizeof(double*)); double **m2 = (double **)malloc(n2*sizeof(double*)); double **m3 = (double **)malloc(m*sizeof(double*)); readMatrix(m1, mat1, m, n1); readMatrix(m2, mat2, n2, p); t = omp_get_wtime(); matrixMul(m1, m2, m3, m, p); t = omp_get_wtime() - t; //total time spent in matrixMul (wall clock time) resultFile = fopen("result.txt", "w"); printMatrix(m3, resultFile, m, p); printf("Elapsed time: %.5f seconds\n", t); fclose(mat1); fclose(mat2); fclose(resultFile); for(i = 0; i < m; i++) { free(m1[i]); free(m3[i]); } for(i = 0; i < n2; i++) { free(m2[i]); } free(m1); free(m2); free(m3); return 0; }
isotope.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include "phonoc_const.h" #include "phonoc_utils.h" #include "isotope.h" #include "lapack_wrapper.h" void iso_get_isotope_scattering_strength(double *gamma, const long grid_point, const double *mass_variances, const double *frequencies, const lapack_complex_double *eigenvectors, const long num_grid_points, const long *band_indices, const long num_band, const long num_band0, const double sigma, const double cutoff_frequency) { long i, j, k, l, m; double *e0_r, *e0_i, e1_r, e1_i, a, b, f, *f0, dist, sum_g, sum_g_k; e0_r = (double *)malloc(sizeof(double) * num_band * num_band0); e0_i = (double *)malloc(sizeof(double) * num_band * num_band0); f0 = (double *)malloc(sizeof(double) * num_band0); for (i = 0; i < num_band0; i++) { f0[i] = frequencies[grid_point * num_band + band_indices[i]]; for (j = 0; j < num_band; j++) { e0_r[i * num_band + j] = lapack_complex_double_real(eigenvectors[grid_point * num_band * num_band + j * num_band + band_indices[i]]); e0_i[i * num_band + j] = lapack_complex_double_imag(eigenvectors[grid_point * num_band * num_band + j * num_band + band_indices[i]]); } } for (i = 0; i < num_band0; i++) { gamma[i] = 0; } for (i = 0; i < num_band0; i++) { /* band index0 */ if (f0[i] < cutoff_frequency) { continue; } sum_g = 0; #ifdef PHPYOPENMP #pragma omp parallel for private(k, l, m, f, e1_r, e1_i, a, b, dist, sum_g_k) reduction(+ \ : sum_g) #endif for (j = 0; j < num_grid_points; j++) { sum_g_k = 0; for (k = 0; k < num_band; k++) { /* band index */ f = frequencies[j * num_band + k]; if (f < cutoff_frequency) { continue; } dist = phonoc_gaussian(f - f0[i], sigma); for (l = 0; l < num_band / 3; l++) { /* elements */ a = 0; b = 0; for (m = 0; m < 3; m++) { e1_r = lapack_complex_double_real(eigenvectors[j * num_band * num_band + (l * 3 + m) * num_band + k]); e1_i = lapack_complex_double_imag(eigenvectors[j * num_band * num_band + (l * 3 + m) * num_band + k]); a += (e0_r[i * num_band + l * 3 + m] * e1_r + e0_i[i * num_band + l * 3 + m] * e1_i); b += (e0_i[i * num_band + l * 3 + m] * e1_r - e0_r[i * num_band + l * 3 + m] * e1_i); } sum_g_k += (a * a + b * b) * mass_variances[l] * dist; } } sum_g += sum_g_k; } gamma[i] = sum_g; } for (i = 0; i < num_band0; i++) { /* Frequency unit to ang-freq: *(2pi)**2/(2pi) */ /* Ang-freq to freq unit (for lifetime): /2pi */ /* gamma = 1/2t */ gamma[i] *= M_2PI / 4 * f0[i] * f0[i] / 2; } free(f0); f0 = NULL; free(e0_r); e0_r = NULL; free(e0_i); e0_i = NULL; } void iso_get_thm_isotope_scattering_strength(double *gamma, const long grid_point, const long *ir_grid_points, const long *weights, const double *mass_variances, const double *frequencies, const lapack_complex_double *eigenvectors, const long num_grid_points, const long *band_indices, const long num_band, const long num_band0, const double *integration_weights, const double cutoff_frequency) { long i, j, k, l, m, gp; double *e0_r, *e0_i, *f0, *gamma_ij; double e1_r, e1_i, a, b, f, dist, sum_g_k; e0_r = (double *)malloc(sizeof(double) * num_band * num_band0); e0_i = (double *)malloc(sizeof(double) * num_band * num_band0); f0 = (double *)malloc(sizeof(double) * num_band0); for (i = 0; i < num_band0; i++) { f0[i] = frequencies[grid_point * num_band + band_indices[i]]; for (j = 0; j < num_band; j++) { e0_r[i * num_band + j] = lapack_complex_double_real(eigenvectors[grid_point * num_band * num_band + j * num_band + band_indices[i]]); e0_i[i * num_band + j] = lapack_complex_double_imag(eigenvectors[grid_point * num_band * num_band + j * num_band + band_indices[i]]); } } gamma_ij = (double *)malloc(sizeof(double) * num_grid_points * num_band0); #ifdef PHPYOPENMP #pragma omp parallel for #endif for (i = 0; i < num_grid_points * num_band0; i++) { gamma_ij[i] = 0; } #ifdef PHPYOPENMP #pragma omp parallel for private(j, k, l, m, f, gp, e1_r, e1_i, a, b, dist, sum_g_k) #endif for (i = 0; i < num_grid_points; i++) { gp = ir_grid_points[i]; for (j = 0; j < num_band0; j++) { /* band index0 */ if (f0[j] < cutoff_frequency) { continue; } sum_g_k = 0; for (k = 0; k < num_band; k++) { /* band index */ f = frequencies[gp * num_band + k]; if (f < cutoff_frequency) { continue; } dist = integration_weights[gp * num_band0 * num_band + j * num_band + k]; for (l = 0; l < num_band / 3; l++) { /* elements */ a = 0; b = 0; for (m = 0; m < 3; m++) { e1_r = lapack_complex_double_real(eigenvectors [gp * num_band * num_band + (l * 3 + m) * num_band + k]); e1_i = lapack_complex_double_imag(eigenvectors [gp * num_band * num_band + (l * 3 + m) * num_band + k]); a += (e0_r[j * num_band + l * 3 + m] * e1_r + e0_i[j * num_band + l * 3 + m] * e1_i); b += (e0_i[j * num_band + l * 3 + m] * e1_r - e0_r[j * num_band + l * 3 + m] * e1_i); } sum_g_k += (a * a + b * b) * mass_variances[l] * dist; } } gamma_ij[gp * num_band0 + j] = sum_g_k * weights[gp]; } } for (i = 0; i < num_band0; i++) { gamma[i] = 0; } for (i = 0; i < num_grid_points; i++) { gp = ir_grid_points[i]; for (j = 0; j < num_band0; j++) { gamma[j] += gamma_ij[gp * num_band0 + j]; } } for (i = 0; i < num_band0; i++) { /* Frequency unit to ang-freq: *(2pi)**2/(2pi) */ /* Ang-freq to freq unit (for lifetime): /2pi */ /* gamma = 1/2t */ gamma[i] *= M_2PI / 4 * f0[i] * f0[i] / 2; } free(gamma_ij); gamma_ij = NULL; free(f0); f0 = NULL; free(e0_r); e0_r = NULL; free(e0_i); e0_i = NULL; }
wyhash.h
/* Author: Wang Yi <godspeed_china@yeah.net> */ #ifndef wyhash_version_1 #define wyhash_version_1 #include <stdint.h> #include <string.h> #include <math.h> #if defined(_MSC_VER) && defined(_M_X64) #include <intrin.h> #pragma intrinsic(_umul128) #endif const uint64_t _wyp0=0xa0761d6478bd642full, _wyp1=0xe7037ed1a0b428dbull, _wyp2=0x8ebc6af09c88c6e3ull; const uint64_t _wyp3=0x589965cc75374cc3ull, _wyp4=0x1d8e4e27c47d124full, _wyp5=0xeb44accab455d165ull; static inline uint64_t _wymum(uint64_t A, uint64_t B){ #ifdef __SIZEOF_INT128__ __uint128_t r=A; r*=B; return (r>>64)^r; #elif defined(_MSC_VER) && defined(_M_X64) A=_umul128(A, B, &B); return A^B; #else uint64_t ha=A>>32, hb=B>>32, la=(uint32_t)A, lb=(uint32_t)B, hi, lo; uint64_t rh=ha*hb, rm0=ha*lb, rm1=hb*la, rl=la*lb, t=rl+(rm0<<32), c=t<rl; lo=t+(rm1<<32); c+=lo<t; hi=rh+(rm0>>32)+(rm1>>32)+c; return hi^lo; #endif } static inline uint64_t _wyr08(const uint8_t *p){ uint8_t v; memcpy(&v, p, 1); return v; } static inline uint64_t _wyr16(const uint8_t *p){ uint16_t v; memcpy(&v, p, 2); return v; } static inline uint64_t _wyr32(const uint8_t *p){ uint32_t v; memcpy(&v, p, 4); return v; } static inline uint64_t _wyr64(const uint8_t *p){ uint64_t v; memcpy(&v, p, 8); return v; } static inline uint64_t __wyr64(const uint8_t *p){ return (_wyr32(p)<<32)|_wyr32(p+4); } static inline uint64_t wyhash(const void* key, uint64_t len, uint64_t seed){ const uint8_t *p=(const uint8_t*)key; uint64_t i; for(i=0; i+32<=len; i+=32, p+=32) seed=_wymum(seed^_wyp0, _wymum(_wyr64(p)^_wyp1,_wyr64(p+8)^_wyp2)^_wymum(_wyr64(p+16)^_wyp3,_wyr64(p+24)^_wyp4)); seed^=_wyp0; switch(len&31){ case 1: seed=_wymum(seed,_wyr08(p)^_wyp1); break; case 2: seed=_wymum(seed,_wyr16(p)^_wyp1); break; case 3: seed=_wymum(seed,((_wyr16(p)<<8)|_wyr08(p+2))^_wyp1); break; case 4: seed=_wymum(seed,_wyr32(p)^_wyp1); break; case 5: seed=_wymum(seed,((_wyr32(p)<<8)|_wyr08(p+4))^_wyp1); break; case 6: seed=_wymum(seed,((_wyr32(p)<<16)|_wyr16(p+4))^_wyp1); break; case 7: seed=_wymum(seed,((_wyr32(p)<<24)|(_wyr16(p+4)<<8)|_wyr08(p+6))^_wyp1); break; case 8: seed=_wymum(seed,__wyr64(p)^_wyp1); break; case 9: seed=_wymum(__wyr64(p)^seed,_wyr08(p+8)^_wyp2); break; case 10: seed=_wymum(__wyr64(p)^seed,_wyr16(p+8)^_wyp2); break; case 11: seed=_wymum(__wyr64(p)^seed,((_wyr16(p+8)<<8)|_wyr08(p+8+2))^_wyp2); break; case 12: seed=_wymum(__wyr64(p)^seed,_wyr32(p+8)^_wyp2); break; case 13: seed=_wymum(__wyr64(p)^seed,((_wyr32(p+8)<<8)|_wyr08(p+8+4))^_wyp2); break; case 14: seed=_wymum(__wyr64(p)^seed,((_wyr32(p+8)<<16)|_wyr16(p+8+4))^_wyp2); break; case 15: seed=_wymum(__wyr64(p)^seed,((_wyr32(p+8)<<24)|(_wyr16(p+8+4)<<8)|_wyr08(p+8+6))^_wyp2); break; case 16: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2); break; case 17: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,_wyr08(p+16)^_wyp3); break; case 18: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,_wyr16(p+16)^_wyp3); break; case 19: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr16(p+16)<<8)|_wyr08(p+16+2))^_wyp3); break; case 20: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,_wyr32(p+16)^_wyp3); break; case 21: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr32(p+16)<<8)|_wyr08(p+16+4))^_wyp3); break; case 22: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr32(p+16)<<16)|_wyr16(p+16+4))^_wyp3); break; case 23: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,((_wyr32(p+16)<<24)|(_wyr16(p+16+4)<<8)|_wyr08(p+16+6))^_wyp3); break; case 24: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(seed,__wyr64(p+16)^_wyp3); break; case 25: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,_wyr08(p+24)^_wyp4); break; case 26: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,_wyr16(p+24)^_wyp4); break; case 27: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr16(p+24)<<8)|_wyr08(p+24+2))^_wyp4); break; case 28: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,_wyr32(p+24)^_wyp4); break; case 29: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr32(p+24)<<8)|_wyr08(p+24+4))^_wyp4); break; case 30: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr32(p+24)<<16)|_wyr16(p+24+4))^_wyp4); break; case 31: seed=_wymum(__wyr64(p)^seed,__wyr64(p+8)^_wyp2)^_wymum(__wyr64(p+16)^seed,((_wyr32(p+24)<<24)|(_wyr16(p+24+4)<<8)|_wyr08(p+24+6))^_wyp4); break; } return _wymum(seed, len^_wyp5); } static inline uint64_t wyhash64(uint64_t A, uint64_t B){ return _wymum(_wymum(A^_wyp0, B^_wyp1), _wyp2); } static inline double wy2u01(uint64_t r){ const double _wynorm=1.0/(1ull<<52); return (r&0x000fffffffffffffull)*_wynorm; } static inline float wy2gau(uint64_t r){ const float _wynorm1=1.0f/(1ull<<20); return ((r&0x1fffff)+((r>>21)&0x1fffff)+(r>>43))*_wynorm1-3.0f; } static inline uint64_t wyrand(uint64_t *seed){ *seed+=_wyp0; return _wymum(*seed^_wyp1,*seed); } static uint64_t _wyrand_seed=0; #define WYRAND_MAX 0xffffffffffffffffull static inline void wysrand(uint64_t seed){ _wyrand_seed=seed; } static inline uint64_t wyrand(void){ uint64_t s; #if defined(_OPENMP) #pragma omp atomic capture #endif { _wyrand_seed += _wyp0; s = _wyrand_seed; } return _wymum(s^_wyp1,s); } #endif
rose_scan.c
#include<math.h> #include<string.h> #define N 16 #include "libxomp.h" int main(argc,argv) int argc; char **argv; { int status = 0; XOMP_init(argc,argv); int r; int b; int v; int a[16]; int simd_scan[16]; int scan_a; int scan_b; for (int i = 0; i < 16; i++) { a[i] = i; simd_scan[i] = 0; } scan_a = 0; scan_b = 10; #pragma omp simd reduction(inscan, + : scan_a) for (int i = 0; i < 16; i++) { simd_scan[i] = scan_a; #pragma omp scan exclusive(r,b,v) scan_a += a[i]; scan_b -= a[i]; } XOMP_terminate(status); return 0; }
ast-dump-openmp-for.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp for for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp for for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp for collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:4:1, col:16> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:10:1, col:16> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:17:1, col:28> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:17, col:27> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:26> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:24:1, col:28> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:17, col:27> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:26> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:26> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPForDirective {{.*}} <line:31:1, col:28> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:17, col:27> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:26> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:26> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
Example3.c
//#include <stdio.h> //#include <omp.h> //#include <conio.h> // //int main(int argc, char *argv[]) //{ // int tid; //#pragma omp parallel num_threads(4) shared(tid) // { //#pragma omp single //Single structure is executed by any thread // { // tid = omp_get_thread_num(); // printf("Single structure is executed by thread %d \n", tid); // } // /* A barrier is automatically added here */ // printf("This code is executed by the thread %d.\n", omp_get_thread_num()); // } // _getch(); // for keep console from <conio.h> library // return 0; //}
zgesv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * ******************************************************************************/ int plasma_zgesv(int n, int nrhs, plasma_complex64_t *pA, int lda, int *ipiv, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } if (n < 0) { plasma_error("illegal value of n"); return -1; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_getrf(plasma, PlasmaComplexDouble, n, n); // Set tiling parameters. int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_zgesv(A, ipiv, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request); plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request); } // Free matrix A in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * ******************************************************************************/ void plasma_omp_zgesv(plasma_desc_t A, int *ipiv, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid B"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pzgetrf(A, ipiv, sequence, request); plasma_pzgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request); plasma_pztrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, A, B, sequence, request); plasma_pztrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, A, B, sequence, request); }
axpy_openmp.c
//axpy.c #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define N_RUNS 1000 #define N 120000 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //Create a matrix and a vector and fill with random numbers void init(float *X, float *Y) { for (int i = 0; i<N; i++) { X[i] = (float)rand()/(float)(RAND_MAX/10.0); Y[i] = (float)rand()/(float)(RAND_MAX/10.0); } } // Debug functions void axpy(float *X, float *Y, float a) { #pragma omp simd for (int i = 0; i<N; i++) { Y[i] += a * X[i]; } } int main(int argc, char **argv) { //Set everything up float *X = malloc(sizeof(float)*N); float *Y = malloc(sizeof(float)*N); float a = 3.14; srand(time(NULL)); init(X, Y); double start = read_timer(); for (int i = 0; i<N_RUNS; i++) axpy(X, Y, a); double t = (read_timer() - start); double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t); printf("==================================================================\n"); printf("Performance:\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------\n"); printf("AXPY (OpenMP):\t\t%4f\t%4f\n", t, gflops_serial); free(X); free(Y); return 0; }
GB_Vector_extractElement.c
//------------------------------------------------------------------------------ // GB_Vector_extractElement: x = V(i) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Extract the value of single scalar, x = V(i), typecasting from the // type of V to the type of x, as needed. // Returns GrB_SUCCESS if V(i) is present, and sets x to its value. // Returns GrB_NO_VALUE if V(i) is not present, and x is unmodified. // This template constructs GrB_Vector_extractElement_[TYPE], for each of the // 13 built-in types, and the _UDT method for all user-defined types. // FUTURE: tolerate zombies GrB_Info GB_EXTRACT_ELEMENT // extract a single entry, x = V(i) ( GB_XTYPE *x, // scalar to extract, not modified if not found const GrB_Vector V, // vector to extract a scalar from GrB_Index i // index ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_RETURN_IF_NULL_OR_FAULTY (V) ; GB_RETURN_IF_NULL (x) ; // delete any lingering zombies, assemble any pending tuples, and unjumble if (GB_ANY_PENDING_WORK (V)) { GrB_Info info ; GB_WHERE1 (GB_WHERE_STRING) ; GB_BURBLE_START ("GrB_Vector_extractElement") ; GB_OK (GB_wait ((GrB_Matrix) V, "v", Context)) ; GB_BURBLE_END ; } ASSERT (!GB_ANY_PENDING_WORK (V)) ; // check index if (i >= V->vlen) { return (GrB_INVALID_INDEX) ; } // GB_XCODE and V must be compatible GB_Type_code vcode = V->type->code ; if (!GB_code_compatible (GB_XCODE, vcode)) { return (GrB_DOMAIN_MISMATCH) ; } if (GB_nnz ((GrB_Matrix) V) == 0) { // quick return return (GrB_NO_VALUE) ; } //-------------------------------------------------------------------------- // find the entry V(i) //-------------------------------------------------------------------------- int64_t pleft ; bool found ; const int64_t *restrict Vp = V->p ; if (Vp != NULL) { // V is sparse const int64_t *restrict Vi = V->i ; pleft = 0 ; int64_t pright = Vp [1] - 1 ; // binary search for index i // Time taken for this step is at most O(log(nnz(V))). GB_BINARY_SEARCH (i, Vi, pleft, pright, found) ; } else { // V is bitmap or full pleft = i ; const int8_t *restrict Vb = V->b ; if (Vb != NULL) { // V is bitmap found = (Vb [pleft] == 1) ; } else { // V is full found = true ; } } //-------------------------------------------------------------------------- // extract the element //-------------------------------------------------------------------------- if (found) { #if !defined ( GB_UDT_EXTRACT ) if (GB_XCODE == vcode) { // copy the value from V [...] into the scalar x, no typecasting, // for built-in types only. GB_XTYPE *restrict Vx = ((GB_XTYPE *) (V->x)) ; (*x) = Vx [V->iso ? 0:pleft] ; } else #endif { // typecast the value from V [...] into the scalar x size_t vsize = V->type->size ; void *vx = ((GB_void *) V->x) + (V->iso ? 0 : (pleft*vsize)) ; GB_cast_scalar (x, GB_XCODE, vx, vcode, vsize) ; } // TODO: do not flush if extracting to GrB_Scalar #pragma omp flush return (GrB_SUCCESS) ; } else { // Entry not found. return (GrB_NO_VALUE) ; } } #undef GB_UDT_EXTRACT #undef GB_EXTRACT_ELEMENT #undef GB_XTYPE #undef GB_XCODE
GB_binop__isle_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int64) // A.*B function (eWiseMult): GB (_AemultB_08__isle_int64) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int64) // A.*B function (eWiseMult): GB (_AemultB_04__isle_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int64) // A*D function (colscale): GB (_AxD__isle_int64) // D*A function (rowscale): GB (_DxB__isle_int64) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int64) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int64) // C=scalar+B GB (_bind1st__isle_int64) // C=scalar+B' GB (_bind1st_tran__isle_int64) // C=A+scalar GB (_bind2nd__isle_int64) // C=A'+scalar GB (_bind2nd_tran__isle_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT64 || GxB_NO_ISLE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isle_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif