source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta;
opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also definate that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
alpha=Sa+Da-Sa*Da;
if (GetPixelWriteMask(image,q) == 0)
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
pixel=QuantumRange*alpha;
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=Sc;
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
gamma=PerceptibleReciprocal(alpha);
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite != (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(source_image,p) == 0)
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if (traits == UndefinedPixelTrait)
continue;
if (source_traits != UndefinedPixelTrait)
SetPixelChannel(image,channel,p[i],q);
else if (channel == AlphaPixelChannel)
SetPixelChannel(image,channel,OpaqueAlpha,q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,
(MagickOffsetType) y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) == 0)
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,
(MagickOffsetType) y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling.
Blur Image dictated by an overlay gradient map: X = red_channel;
Y = green_channel; compose:args = x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the
users input the ellipse size needs to be doubled.
*/
width=height=geometry_info.rho*2.0;
if ((flags & HeightValue) != 0 )
height=geometry_info.sigma*2.0;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
/* rotate vectors if a rotation angle is given */
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
/* Otherwise lets set a angle range and calculate in the loop */
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry!
The solution (as well as the problem of preventing any user
expert filter settings, is to set our own user settings, then
restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/* do the variable blurring of each pixel in image */
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs((double) angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
#if 0
if ( x == 10 && y == 60 ) {
(void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1,
blur.x2,blur.y1, blur.y2);
(void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale*
GetPixelRed(p),QuantumScale*GetPixelGreen(p));
#endif
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
(void) InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
{
alpha=GetPixelIntensity(source_image,p)*Sa;
break;
}
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case LightenCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case ModulusAddCompositeOp:
case ModulusSubtractCompositeOp:
case MultiplyCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ScreenCompositeOp:
case SoftLightCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
default:
{
alpha=1.0;
break;
}
}
if (GetPixelWriteMask(image,q) == 0)
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(channel != AlphaPixelChannel))
continue;
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case ChangeMaskCompositeOp:
{
MagickBooleanType
equivalent;
if (Da < 0.5)
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q);
if (equivalent != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) OpaqueAlpha;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if (source_image->alpha_trait == UndefinedPixelTrait)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case LightenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=Sc;
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-Dca/Da)*Sa/
Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*
(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) (QuantumRange-
GetPixelBlack(source_image,p));
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa/Sca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
pixel=Sc+Dc;
while (pixel > QuantumRange)
pixel-=QuantumRange;
while (pixel < 0.0)
pixel+=QuantumRange;
pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa));
break;
}
case ModulusSubtractCompositeOp:
{
pixel=Sc-Dc;
while (pixel > QuantumRange)
pixel-=QuantumRange;
while (pixel < 0.0)
pixel+=QuantumRange;
pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa));
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sca);
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
pixel=QuantumRange*(Sca+Dca);
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-(Dca/Da)))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*(Dca/Da)*
(4.0*(Dca/Da)+1.0)*((Dca/Da)-1.0)+7.0*(Dca/Da))+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow((Dca/Da),0.5)-
(Dca/Da))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca*
(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca*
(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(texture_image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p,
*pixels;
register ssize_t
x;
register Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
register ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel=GetPixelChannelChannel(texture_image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
GB_unaryop__ainv_uint32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_uint16
// op(A') function: GB_tran__ainv_uint32_uint16
// C type: uint32_t
// A type: uint16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_uint16
(
uint32_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__isne_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int64)
// A*D function (colscale): GB (_AxD__isne_int64)
// D*A function (rowscale): GB (_DxB__isne_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int64)
// C=scalar+B GB (_bind1st__isne_int64)
// C=scalar+B' GB (_bind1st_tran__isne_int64)
// C=A+scalar GB (_bind2nd__isne_int64)
// C=A'+scalar GB (_bind2nd_tran__isne_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT64 || GxB_NO_ISNE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_combinado.c | /******************************************************************************
* OpenMP Example - Combined Parallel Loop Work-sharing - C/C++ Version
* FILE: omp_workshare3.c
* DESCRIPTION:
* This example attempts to show use of the parallel for construct. However
* it will generate errors at compile time. Try to determine what is causing
* the error. See omp_workshare4.c for a corrected version.
* SOURCE: Blaise Barney 5/99
* LAST REVISED:
******************************************************************************/
#include <stdio.h>
#include <omp.h>
#define N 50
#define CHUNK 5
int main () {
int i, n, chunk, tid;
float a[N], b[N], c[N];
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
n = N;
chunk = CHUNK;
#pragma omp parallel for \
shared(a,b,c,n) \
private(i,tid) \
schedule(static,chunk)
{
tid = omp_get_thread_num();
for (i=0; i < n; i++)
{
c[i] = a[i] + b[i];
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]);
}
} /* end of parallel for construct */
}
|
Matrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matrix - Matrix stored and accessible by rows. Indices and values for
* the matrix nonzeros are copied into the matrix a row at a time, in any
* order using the MatrixGetRow function. The MatrixPutRow function returns
* a pointer to the indices and values of a row. The matrix has a set of
* row and column indices such that these indices begin at "beg" and end
* at "end", where 0 <= "beg" <= "end". In other words, the matrix indices
* have any nonnegative base value, and the base values of the row and column
* indices must agree.
*
*****************************************************************************/
#include <stdlib.h>
#include <memory.h>
#include "Common.h"
#include "Matrix.h"
#include "Numbering.h"
#define MAX_NZ_PER_ROW 1000
/*--------------------------------------------------------------------------
* MatrixCreate - Return (a pointer to) a matrix object.
*--------------------------------------------------------------------------*/
Matrix *MatrixCreate(MPI_Comm comm, HYPRE_Int beg_row, HYPRE_Int end_row)
{
HYPRE_Int num_rows, mype, npes;
Matrix *mat = hypre_TAlloc(Matrix, 1, HYPRE_MEMORY_HOST);
mat->comm = comm;
mat->beg_row = beg_row;
mat->end_row = end_row;
mat->mem = (Mem *) MemCreate();
num_rows = mat->end_row - mat->beg_row + 1;
mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int));
mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *));
mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *));
/* Send beg_row and end_row to all processors */
/* This is needed in order to map row numbers to processors */
hypre_MPI_Comm_rank(comm, &mype);
hypre_MPI_Comm_size(comm, &npes);
mat->beg_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int));
mat->end_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int));
hypre_MPI_Allgather(&beg_row, 1, HYPRE_MPI_INT, mat->beg_rows, 1, HYPRE_MPI_INT, comm);
hypre_MPI_Allgather(&end_row, 1, HYPRE_MPI_INT, mat->end_rows, 1, HYPRE_MPI_INT, comm);
mat->num_recv = 0;
mat->num_send = 0;
mat->recv_req = NULL;
mat->send_req = NULL;
mat->recv_req2 = NULL;
mat->send_req2 = NULL;
mat->statuses = NULL;
mat->sendind = NULL;
mat->sendbuf = NULL;
mat->recvbuf = NULL;
mat->numb = NULL;
return mat;
}
/*--------------------------------------------------------------------------
* MatrixCreateLocal - Return (a pointer to) a matrix object.
* The matrix created by this call is a local matrix, not a global matrix.
*--------------------------------------------------------------------------*/
Matrix *MatrixCreateLocal(HYPRE_Int beg_row, HYPRE_Int end_row)
{
HYPRE_Int num_rows;
Matrix *mat = hypre_TAlloc(Matrix, 1, HYPRE_MEMORY_HOST);
mat->comm = hypre_MPI_COMM_NULL;
mat->beg_row = beg_row;
mat->end_row = end_row;
mat->mem = (Mem *) MemCreate();
num_rows = mat->end_row - mat->beg_row + 1;
mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int));
mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *));
mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *));
/* Send beg_row and end_row to all processors */
/* This is needed in order to map row numbers to processors */
mat->beg_rows = NULL;
mat->end_rows = NULL;
mat->num_recv = 0;
mat->num_send = 0;
mat->recv_req = NULL;
mat->send_req = NULL;
mat->recv_req2 = NULL;
mat->send_req2 = NULL;
mat->statuses = NULL;
mat->sendind = NULL;
mat->sendbuf = NULL;
mat->recvbuf = NULL;
mat->numb = NULL;
return mat;
}
/*--------------------------------------------------------------------------
* MatrixDestroy - Destroy a matrix object "mat".
*--------------------------------------------------------------------------*/
void MatrixDestroy(Matrix *mat)
{
HYPRE_Int i;
for (i=0; i<mat->num_recv; i++)
hypre_MPI_Request_free(&mat->recv_req[i]);
for (i=0; i<mat->num_send; i++)
hypre_MPI_Request_free(&mat->send_req[i]);
for (i=0; i<mat->num_send; i++)
hypre_MPI_Request_free(&mat->recv_req2[i]);
for (i=0; i<mat->num_recv; i++)
hypre_MPI_Request_free(&mat->send_req2[i]);
free(mat->recv_req);
free(mat->send_req);
free(mat->recv_req2);
free(mat->send_req2);
free(mat->statuses);
free(mat->sendind);
free(mat->sendbuf);
free(mat->recvbuf);
MemDestroy(mat->mem);
if (mat->numb)
NumberingDestroy(mat->numb);
free(mat);
}
/*--------------------------------------------------------------------------
* MatrixSetRow - Set a row in a matrix. Only local rows can be set.
* Once a row has been set, it should not be set again, or else the
* memory used by the existing row will not be recovered until
* the matrix is destroyed. "row" is in global coordinate numbering.
*--------------------------------------------------------------------------*/
void MatrixSetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int len, HYPRE_Int *ind, HYPRE_Real *val)
{
row -= mat->beg_row;
mat->lens[row] = len;
mat->inds[row] = (HYPRE_Int *) MemAlloc(mat->mem, len*sizeof(HYPRE_Int));
mat->vals[row] = (HYPRE_Real *) MemAlloc(mat->mem, len*sizeof(HYPRE_Real));
if (ind != NULL)
hypre_TMemcpy(mat->inds[row], ind, HYPRE_Int, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
if (val != NULL)
hypre_TMemcpy(mat->vals[row], val, HYPRE_Real, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
}
/*--------------------------------------------------------------------------
* MatrixGetRow - Get a *local* row in a matrix.
*--------------------------------------------------------------------------*/
void MatrixGetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int *lenp, HYPRE_Int **indp, HYPRE_Real **valp)
{
*lenp = mat->lens[row];
*indp = mat->inds[row];
*valp = mat->vals[row];
}
/*--------------------------------------------------------------------------
* MatrixRowPe - Map "row" to a processor number.
*--------------------------------------------------------------------------*/
HYPRE_Int MatrixRowPe(Matrix *mat, HYPRE_Int row)
{
HYPRE_Int npes, pe;
HYPRE_Int *beg = mat->beg_rows;
HYPRE_Int *end = mat->end_rows;
hypre_MPI_Comm_size(mat->comm, &npes);
for (pe=0; pe<npes; pe++)
{
if (row >= beg[pe] && row <= end[pe])
return pe;
}
hypre_printf("MatrixRowPe: could not map row %d.\n", row);
PARASAILS_EXIT;
return -1; /* for picky compilers */
}
/*--------------------------------------------------------------------------
* MatrixNnz - Return total number of nonzeros in preconditioner.
*--------------------------------------------------------------------------*/
HYPRE_Int MatrixNnz(Matrix *mat)
{
HYPRE_Int num_local, i, total, alltotal;
num_local = mat->end_row - mat->beg_row + 1;
total = 0;
for (i=0; i<num_local; i++)
total += mat->lens[i];
hypre_MPI_Allreduce(&total, &alltotal, 1, HYPRE_MPI_INT, hypre_MPI_SUM, mat->comm);
return alltotal;
}
/*--------------------------------------------------------------------------
* MatrixPrint - Print a matrix to a file "filename". Each processor
* appends to the file in order, but the file is overwritten if it exists.
*--------------------------------------------------------------------------*/
void MatrixPrint(Matrix *mat, char *filename)
{
HYPRE_Int mype, npes, pe;
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val;
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Comm_size(mat->comm, &npes);
for (pe=0; pe<npes; pe++)
{
hypre_MPI_Barrier(mat->comm);
if (mype == pe)
{
FILE *file = fopen(filename, (pe==0 ? "w" : "a"));
hypre_assert(file != NULL);
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
for (i=0; i<len; i++)
hypre_fprintf(file, "%d %d %.14e\n",
row + mat->beg_row,
mat->numb->local_to_global[ind[i]], val[i]);
}
fclose(file);
}
}
}
/*--------------------------------------------------------------------------
* MatrixReadMaster - MatrixRead routine for processor 0. Internal use.
*--------------------------------------------------------------------------*/
static void MatrixReadMaster(Matrix *mat, char *filename)
{
MPI_Comm comm = mat->comm;
HYPRE_Int mype, npes;
FILE *file;
HYPRE_Int ret;
HYPRE_Int num_rows, curr_proc;
HYPRE_Int row, col;
HYPRE_Real value;
hypre_longint offset;
hypre_longint outbuf;
HYPRE_Int curr_row;
HYPRE_Int len;
HYPRE_Int ind[MAX_NZ_PER_ROW];
HYPRE_Real val[MAX_NZ_PER_ROW];
char line[100];
HYPRE_Int oldrow;
hypre_MPI_Request request;
hypre_MPI_Status status;
hypre_MPI_Comm_size(mat->comm, &npes);
hypre_MPI_Comm_rank(mat->comm, &mype);
file = fopen(filename, "r");
hypre_assert(file != NULL);
fgets(line, 100, file);
#ifdef EMSOLVE
ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows);
for (row=0; row<num_rows; row++)
hypre_fscanf(file, "%*d");
#else
ret = hypre_sscanf(line, "%d %*d %*d", &num_rows);
#endif
offset = ftell(file);
hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
request = hypre_MPI_REQUEST_NULL;
curr_proc = 1; /* proc for which we are looking for the beginning */
while (curr_proc < npes)
{
if (row == mat->beg_rows[curr_proc])
{
hypre_MPI_Wait(&request, &status);
outbuf = offset;
hypre_MPI_Isend(&outbuf, 1, hypre_MPI_LONG, curr_proc, 0, comm, &request);
curr_proc++;
}
offset = ftell(file);
oldrow = row;
hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
if (oldrow > row)
{
hypre_fprintf(stderr, "Matrix file is not sorted by rows.\n");
PARASAILS_EXIT;
}
}
/* Now read our own part */
rewind(file);
fgets(line, 100, file);
#ifdef EMSOLVE
ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows);
for (row=0; row<num_rows; row++)
hypre_fscanf(file, "%*d");
#else
ret = hypre_sscanf(line, "%d %*d %*d", &num_rows);
#endif
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
curr_row = row;
len = 0;
while (ret != EOF && row <= mat->end_row)
{
if (row != curr_row)
{
/* store this row */
MatrixSetRow(mat, curr_row, len, ind, val);
curr_row = row;
/* reset row pointer */
len = 0;
}
if (len >= MAX_NZ_PER_ROW)
{
hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW);
hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n");
hypre_fprintf(stderr, "increased to continue.\n");
PARASAILS_EXIT;
}
ind[len] = col;
val[len] = value;
len++;
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
}
/* Store the final row */
if (ret == EOF || row > mat->end_row)
MatrixSetRow(mat, mat->end_row, len, ind, val);
fclose(file);
hypre_MPI_Wait(&request, &status);
}
/*--------------------------------------------------------------------------
* MatrixReadSlave - MatrixRead routine for other processors. Internal use.
*--------------------------------------------------------------------------*/
static void MatrixReadSlave(Matrix *mat, char *filename)
{
MPI_Comm comm = mat->comm;
hypre_MPI_Status status;
HYPRE_Int mype;
FILE *file;
HYPRE_Int ret;
HYPRE_Int row, col;
HYPRE_Real value;
hypre_longint offset;
HYPRE_Int curr_row;
HYPRE_Int len;
HYPRE_Int ind[MAX_NZ_PER_ROW];
HYPRE_Real val[MAX_NZ_PER_ROW];
HYPRE_Real time0, time1;
file = fopen(filename, "r");
hypre_assert(file != NULL);
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Recv(&offset, 1, hypre_MPI_LONG, 0, 0, comm, &status);
time0 = hypre_MPI_Wtime();
ret = fseek(file, offset, SEEK_SET);
hypre_assert(ret == 0);
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
curr_row = row;
len = 0;
while (ret != EOF && row <= mat->end_row)
{
if (row != curr_row)
{
/* store this row */
MatrixSetRow(mat, curr_row, len, ind, val);
curr_row = row;
/* reset row pointer */
len = 0;
}
if (len >= MAX_NZ_PER_ROW)
{
hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW);
hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n");
hypre_fprintf(stderr, "increased to continue.\n");
PARASAILS_EXIT;
}
ind[len] = col;
val[len] = value;
len++;
ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value);
}
/* Store the final row */
if (ret == EOF || row > mat->end_row)
MatrixSetRow(mat, mat->end_row, len, ind, val);
fclose(file);
time1 = hypre_MPI_Wtime();
hypre_printf("%d: Time for slave read: %f\n", mype, time1-time0);
}
/*--------------------------------------------------------------------------
* MatrixRead - Read a matrix file "filename" from disk and store in the
* matrix "mat" which has already been created using MatrixCreate. The format
* assumes no nonzero rows, the rows are in order, and there will be at least
* one row per processor.
*--------------------------------------------------------------------------*/
void MatrixRead(Matrix *mat, char *filename)
{
HYPRE_Int mype;
HYPRE_Real time0, time1;
hypre_MPI_Comm_rank(mat->comm, &mype);
time0 = hypre_MPI_Wtime();
if (mype == 0)
MatrixReadMaster(mat, filename);
else
MatrixReadSlave(mat, filename);
time1 = hypre_MPI_Wtime();
hypre_printf("%d: Time for reading matrix: %f\n", mype, time1-time0);
MatrixComplete(mat);
}
/*--------------------------------------------------------------------------
* RhsRead - Read a right-hand side file "filename" from disk and store in the
* location pointed to by "rhs". "mat" is needed to provide the partitioning
* information. The expected format is: a header line (n, nrhs) followed
* by n values. Also allows isis format, indicated by 1 HYPRE_Int in first line.
*--------------------------------------------------------------------------*/
void RhsRead(HYPRE_Real *rhs, Matrix *mat, char *filename)
{
FILE *file;
hypre_MPI_Status status;
HYPRE_Int mype, npes;
HYPRE_Int num_rows, num_local, pe, i, converted;
HYPRE_Real *buffer = NULL;
HYPRE_Int buflen = 0;
char line[100];
HYPRE_Int dummy;
hypre_MPI_Comm_size(mat->comm, &npes);
hypre_MPI_Comm_rank(mat->comm, &mype);
num_local = mat->end_row - mat->beg_row + 1;
if (mype != 0)
{
hypre_MPI_Recv(rhs, num_local, hypre_MPI_REAL, 0, 0, mat->comm, &status);
return;
}
file = fopen(filename, "r");
hypre_assert(file != NULL);
fgets(line, 100, file);
converted = hypre_sscanf(line, "%d %d", &num_rows, &dummy);
hypre_assert(num_rows == mat->end_rows[npes-1]);
/* Read own rows first */
for (i=0; i<num_local; i++)
if (converted == 1) /* isis format */
hypre_fscanf(file, "%*d %lf", &rhs[i]);
else
hypre_fscanf(file, "%lf", &rhs[i]);
for (pe=1; pe<npes; pe++)
{
num_local = mat->end_rows[pe] - mat->beg_rows[pe]+ 1;
if (buflen < num_local)
{
free(buffer);
buflen = num_local;
buffer = hypre_TAlloc(HYPRE_Real, buflen , HYPRE_MEMORY_HOST);
}
for (i=0; i<num_local; i++)
if (converted == 1) /* isis format */
hypre_fscanf(file, "%*d %lf", &buffer[i]);
else
hypre_fscanf(file, "%lf", &buffer[i]);
hypre_MPI_Send(buffer, num_local, hypre_MPI_REAL, pe, 0, mat->comm);
}
free(buffer);
}
/*--------------------------------------------------------------------------
* SetupReceives
*--------------------------------------------------------------------------*/
static void SetupReceives(Matrix *mat, HYPRE_Int reqlen, HYPRE_Int *reqind, HYPRE_Int *outlist)
{
HYPRE_Int i, j, this_pe, mype;
hypre_MPI_Request request;
MPI_Comm comm = mat->comm;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
hypre_MPI_Comm_rank(comm, &mype);
mat->num_recv = 0;
/* Allocate recvbuf */
/* recvbuf has numlocal entires saved for local part of x, used in matvec */
mat->recvlen = reqlen; /* used for the transpose multiply */
mat->recvbuf = hypre_TAlloc(HYPRE_Real, (reqlen+num_local) , HYPRE_MEMORY_HOST);
for (i=0; i<reqlen; i=j) /* j is set below */
{
/* The processor that owns the row with index reqind[i] */
this_pe = MatrixRowPe(mat, reqind[i]);
/* Figure out other rows we need from this_pe */
for (j=i+1; j<reqlen; j++)
{
/* if row is on different pe */
if (reqind[j] < mat->beg_rows[this_pe] ||
reqind[j] > mat->end_rows[this_pe])
break;
}
/* Request rows in reqind[i..j-1] */
hypre_MPI_Isend(&reqind[i], j-i, HYPRE_MPI_INT, this_pe, 444, comm, &request);
hypre_MPI_Request_free(&request);
/* Count of number of number of indices needed from this_pe */
outlist[this_pe] = j-i;
hypre_MPI_Recv_init(&mat->recvbuf[i+num_local], j-i, hypre_MPI_REAL, this_pe, 555,
comm, &mat->recv_req[mat->num_recv]);
hypre_MPI_Send_init(&mat->recvbuf[i+num_local], j-i, hypre_MPI_REAL, this_pe, 666,
comm, &mat->send_req2[mat->num_recv]);
mat->num_recv++;
}
}
/*--------------------------------------------------------------------------
* SetupSends
* This function will wait for all receives to complete.
*--------------------------------------------------------------------------*/
static void SetupSends(Matrix *mat, HYPRE_Int *inlist)
{
HYPRE_Int i, j, mype, npes;
hypre_MPI_Request *requests;
hypre_MPI_Status *statuses;
MPI_Comm comm = mat->comm;
hypre_MPI_Comm_rank(comm, &mype);
hypre_MPI_Comm_size(comm, &npes);
requests = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
statuses = hypre_TAlloc(hypre_MPI_Status, npes , HYPRE_MEMORY_HOST);
/* Determine size of and allocate sendbuf and sendind */
mat->sendlen = 0;
for (i=0; i<npes; i++)
mat->sendlen += inlist[i];
mat->sendbuf = NULL;
mat->sendind = NULL;
if (mat->sendlen)
{
mat->sendbuf = hypre_TAlloc(HYPRE_Real, mat->sendlen , HYPRE_MEMORY_HOST);
mat->sendind = hypre_TAlloc(HYPRE_Int, mat->sendlen , HYPRE_MEMORY_HOST);
}
j = 0;
mat->num_send = 0;
for (i=0; i<npes; i++)
{
if (inlist[i] != 0)
{
/* Post receive for the actual indices */
hypre_MPI_Irecv(&mat->sendind[j], inlist[i], HYPRE_MPI_INT, i, 444, comm,
&requests[mat->num_send]);
/* Set up the send */
hypre_MPI_Send_init(&mat->sendbuf[j], inlist[i], hypre_MPI_REAL, i, 555, comm,
&mat->send_req[mat->num_send]);
/* Set up the receive for the transpose */
hypre_MPI_Recv_init(&mat->sendbuf[j], inlist[i], hypre_MPI_REAL, i, 666, comm,
&mat->recv_req2[mat->num_send]);
mat->num_send++;
j += inlist[i];
}
}
hypre_MPI_Waitall(mat->num_send, requests, statuses);
free(requests);
free(statuses);
/* convert global indices to local indices */
/* these are all indices on this processor */
for (i=0; i<mat->sendlen; i++)
mat->sendind[i] -= mat->beg_row;
}
/*--------------------------------------------------------------------------
* MatrixComplete
*--------------------------------------------------------------------------*/
void MatrixComplete(Matrix *mat)
{
HYPRE_Int mype, npes;
HYPRE_Int *outlist, *inlist;
HYPRE_Int row, len, *ind;
HYPRE_Real *val;
hypre_MPI_Comm_rank(mat->comm, &mype);
hypre_MPI_Comm_size(mat->comm, &npes);
mat->recv_req = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->send_req = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->recv_req2 = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->send_req2 = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST);
mat->statuses = hypre_TAlloc(hypre_MPI_Status, npes , HYPRE_MEMORY_HOST);
outlist = hypre_CTAlloc(HYPRE_Int, npes, HYPRE_MEMORY_HOST);
inlist = hypre_CTAlloc(HYPRE_Int, npes, HYPRE_MEMORY_HOST);
/* Create Numbering object */
mat->numb = NumberingCreate(mat, PARASAILS_NROWS);
SetupReceives(mat, mat->numb->num_ind - mat->numb->num_loc,
&mat->numb->local_to_global[mat->numb->num_loc], outlist);
hypre_MPI_Alltoall(outlist, 1, HYPRE_MPI_INT, inlist, 1, HYPRE_MPI_INT, mat->comm);
SetupSends(mat, inlist);
free(outlist);
free(inlist);
/* Convert to local indices */
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
NumberingGlobalToLocal(mat->numb, len, ind, ind);
}
}
/*--------------------------------------------------------------------------
* MatrixMatvec
* Can be done in place.
*--------------------------------------------------------------------------*/
void MatrixMatvec(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y)
{
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val, temp;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Put components of x into the right outgoing buffers */
for (i=0; i<mat->sendlen; i++)
mat->sendbuf[i] = x[mat->sendind[i]];
hypre_MPI_Startall(mat->num_recv, mat->recv_req);
hypre_MPI_Startall(mat->num_send, mat->send_req);
/* Copy local part of x into top part of recvbuf */
for (i=0; i<num_local; i++)
mat->recvbuf[i] = x[i];
hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses);
/* do the multiply */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(row,len,ind,val,temp,i) schedule(static)
#endif
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
temp = 0.0;
for (i=0; i<len; i++)
{
temp = temp + val[i] * mat->recvbuf[ind[i]];
}
y[row] = temp;
}
hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses);
}
void MatrixMatvecSerial(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y)
{
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val, temp;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Put components of x into the right outgoing buffers */
for (i=0; i<mat->sendlen; i++)
mat->sendbuf[i] = x[mat->sendind[i]];
hypre_MPI_Startall(mat->num_recv, mat->recv_req);
hypre_MPI_Startall(mat->num_send, mat->send_req);
/* Copy local part of x into top part of recvbuf */
for (i=0; i<num_local; i++)
mat->recvbuf[i] = x[i];
hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses);
/* do the multiply */
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
temp = 0.0;
for (i=0; i<len; i++)
{
temp = temp + val[i] * mat->recvbuf[ind[i]];
}
y[row] = temp;
}
hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses);
}
/*--------------------------------------------------------------------------
* MatrixMatvecTrans
* Can be done in place.
*--------------------------------------------------------------------------*/
void MatrixMatvecTrans(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y)
{
HYPRE_Int row, i, len, *ind;
HYPRE_Real *val;
HYPRE_Int num_local = mat->end_row - mat->beg_row + 1;
/* Set up persistent communications */
/* Assumes MatrixComplete has been called */
/* Post receives for local parts of the solution y */
hypre_MPI_Startall(mat->num_send, mat->recv_req2);
/* initialize accumulator buffer to zero */
for (i=0; i<mat->recvlen+num_local; i++)
mat->recvbuf[i] = 0.0;
/* do the multiply */
for (row=0; row<=mat->end_row - mat->beg_row; row++)
{
MatrixGetRow(mat, row, &len, &ind, &val);
for (i=0; i<len; i++)
{
mat->recvbuf[ind[i]] += val[i] * x[row];
}
}
/* Now can send nonlocal parts of solution to other procs */
hypre_MPI_Startall(mat->num_recv, mat->send_req2);
/* copy local part of solution into y */
for (i=0; i<num_local; i++)
y[i] = mat->recvbuf[i];
/* alternatively, loop over a wait any */
hypre_MPI_Waitall(mat->num_send, mat->recv_req2, mat->statuses);
/* add all the incoming partial sums to y */
for (i=0; i<mat->sendlen; i++)
y[mat->sendind[i]] += mat->sendbuf[i];
hypre_MPI_Waitall(mat->num_recv, mat->send_req2, mat->statuses);
}
|
omp-spawn-n-tasks-in-explicit-task-with-yield.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdbool.h>
int main(int argc, char *argv[])
{
if(argc == 1 || argc >= 4)
{
fprintf(stderr, "Usage: %s nTasks [yield (default:1)]\n", argv[0]);
exit(EXIT_FAILURE);
}
int nTasks = atoi(argv[1]);
bool yield = true;
if (argc == 3)
yield = (atoi(argv[2]) ? true : false);
#pragma omp parallel
{
#pragma omp single nowait
#pragma omp task
{
#pragma omp taskloop grainsize(1)
for (int i=0; i<nTasks; i++){
usleep(1000);
if (yield) {
#pragma omp taskyield
}
}
}
#pragma omp taskwait
}
return EXIT_SUCCESS;
} |
lulesh.c | /*
This is a Version 2.0 MPI + Open{ACC,MP} Beta implementation of LULESH
Copyright (c) 2010-2013.
Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory.
LLNL-CODE-461231
All rights reserved.
This file is part of LULESH, Version 2.0.
Please also read this link -- http://www.opensource.org/licenses/index.php
//////////////
DIFFERENCES BETWEEN THIS VERSION (2.x) AND EARLIER VERSIONS:
* Addition of regions to make work more representative of multi-material codes
* Default size of each domain is 30^3 (27000 elem) instead of 45^3. This is
more representative of our actual working set sizes
* Single source distribution supports pure serial, pure OpenMP, MPI-only,
and MPI+OpenMP
* Addition of ability to visualize the mesh using VisIt
https://wci.llnl.gov/codes/visit/download.html
* Various command line options (see ./lulesh2.0 -h)
-q : quiet mode - suppress stdout
-i <iterations> : number of cycles to run
-s <size> : length of cube mesh along side
-r <numregions> : Number of distinct regions (def: 11)
-b <balance> : Load balance between regions of a domain (def: 1)
-c <cost> : Extra cost of more expensive regions (def: 1)
-f <filepieces> : Number of file parts for viz output (def: np/9)
-p : Print out progress
-v : Output viz file (requires compiling with -DVIZ_MESH
-h : This message
printf("Usage: %s [opts]\n", execname);
printf(" where [opts] is one or more of:\n");
printf(" -q : quiet mode - suppress all stdout\n");
printf(" -i <iterations> : number of cycles to run\n");
printf(" -s <size> : length of cube mesh along side\n");
printf(" -r <numregions> : Number of distinct regions (def: 11)\n");
printf(" -b <balance> : Load balance between regions of a domain (def: 1)\n");
printf(" -c <cost> : Extra cost of more expensive regions (def: 1)\n");
printf(" -f <numfiles> : Number of files to split viz dump into (def: (np+10)/9)\n");
printf(" -p : Print out progress\n");
printf(" -v : Output viz file (requires compiling with -DVIZ_MESH\n");
printf(" -h : This message\n");
printf("\n\n");
*Notable changes in LULESH 2.0
* Split functionality into different files
lulesh.cc - where most (all?) of the timed functionality lies
lulesh-comm.cc - MPI functionality
lulesh-init.cc - Setup code
lulesh-util.cc - Non-timed functions
*
* The concept of "regions" was added, although every region is the same ideal gas material, and the same sedov blast wave problem is still the only problem its hardcoded to solve. Regions allow two things important to making this proxy app more representative:
* Four of the LULESH routines are now performed on a region-by-region basis, making the memory access patterns non-unit stride
* Artificial load imbalances can be easily introduced that could impact parallelization strategies.
* The load balance flag changes region assignment. Region number is raised to the power entered for assignment probability. Most likely regions changes with MPI process id.
* The cost flag raises the cost of ~45% of the regions to evaluate EOS by the entered multiple. The cost of 5% is 10x the entered multiple.
* MPI and OpenMP were added, and coalesced into a single version of the source that can support serial builds, MPI-only, OpenMP-only, and MPI+OpenMP
* Added support to write plot files using "poor mans parallel I/O" when linked with the silo library, which in turn can be read by VisIt.
* Enabled variable timestep calculation by default (courant condition), which results in an additional reduction.
* Default domain (mesh) size reduced from 45^3 to 30^3
* Command line options to allow for numerous test cases without needing to recompile
* Performance optimizations and code cleanup uncovered during study of LULESH 1.0
* Added a "Figure of Merit" calculation (elements solved per microsecond) and output in support of using LULESH 2.0 for the 2017 CORAL procurement
*
* Possible Differences in Final Release (other changes possible)
*
* High Level mesh structure to allow data structure transformations
* Different default parameters
* Minor code performance changes and cleanup
TODO in future versions
* Add reader for (truly) unstructured meshes, probably serial only
* CMake based build system
//////////////
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional BSD Notice
1. This notice is required to be provided under our contract with the U.S.
Department of Energy (DOE). This work was produced at Lawrence Livermore
National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
2. Neither the United States Government nor Lawrence Livermore National
Security, LLC nor any of their employees, makes any warranty, express
or implied, or assumes any liability or responsibility for the accuracy,
completeness, or usefulness of any information, apparatus, product, or
process disclosed, or represents that its use would not infringe
privately-owned rights.
3. Also, reference herein to any specific commercial products, process, or
services by trade name, trademark, manufacturer or otherwise does not
necessarily constitute or imply its endorsement, recommendation, or
favoring by the United States Government or Lawrence Livermore National
Security, LLC. The views and opinions of authors expressed herein do not
necessarily state or reflect those of the United States Government or
Lawrence Livermore National Security, LLC, and shall not be used for
advertising or product endorsement purposes.
*/
//#include <math.h>
#include <stdio.h>
#include <stdlib.h>
//#include <string.h>
//#include <ctype.h>
#include <time.h>
#if !defined(_OPENACC) && defined(_OPENMP)
# include <omp.h>
#endif
#include "lulesh.h"
#if !defined(LULESH_DUMP_OUTPUT)
#define LULESH_DUMP_OUTPUT 0
#endif
/*********************************/
/* Data structure implementation */
/*********************************/
/* These structs are used to turn local, constant-sized arrays into scalars
inside of accelerated regions. */
typedef struct
{
Real_t v0;
Real_t v1;
Real_t v2;
Real_t v3;
Real_t v4;
Real_t v5;
Real_t v6;
Real_t v7;
} val8;
typedef struct
{
Real_t v0;
Real_t v1;
Real_t v2;
Real_t v3;
Real_t v4;
Real_t v5;
} val6;
typedef struct
{
// 3x8 matrix for loop unrolling
Real_t v0_0; Real_t v0_1; Real_t v0_2; Real_t v0_3; Real_t v0_4; Real_t v0_5; Real_t v0_6; Real_t v0_7;
Real_t v1_0; Real_t v1_1; Real_t v1_2; Real_t v1_3; Real_t v1_4; Real_t v1_5; Real_t v1_6; Real_t v1_7;
Real_t v2_0; Real_t v2_1; Real_t v2_2; Real_t v2_3; Real_t v2_4; Real_t v2_5; Real_t v2_6; Real_t v2_7;
} bmat;
typedef struct
{
// 8x4 matrix for loop unrolling
Real_t v0_0; Real_t v0_1;Real_t v0_2;Real_t v0_3;
Real_t v1_0; Real_t v1_1;Real_t v1_2;Real_t v1_3;
Real_t v2_0; Real_t v2_1;Real_t v2_2;Real_t v2_3;
Real_t v3_0; Real_t v3_1;Real_t v3_2;Real_t v3_3;
Real_t v4_0; Real_t v4_1;Real_t v4_2;Real_t v4_3;
Real_t v5_0; Real_t v5_1;Real_t v5_2;Real_t v5_3;
Real_t v6_0; Real_t v6_1;Real_t v6_2;Real_t v6_3;
Real_t v7_0; Real_t v7_1;Real_t v7_2;Real_t v7_3;
} hourmat;
#if USE_MPI
// Communication Work space
Real_t *commDataSend ;
Real_t *commDataRecv ;
// Maximum number of block neighbors
MPI_Request recvRequest[26] ; // 6 faces + 12 edges + 8 corners
MPI_Request sendRequest[26] ; // 6 faces + 12 edges + 8 corners
#endif
int m_numDevs;
/* Node-centered */
Real_t* m_x ; /* coordinates */
Real_t* m_y ;
Real_t* m_z ;
Real_t* m_xd ; /* velocities */
Real_t* m_yd ;
Real_t* m_zd ;
Real_t* m_xdd ; /* accelerations */
Real_t* m_ydd ;
Real_t* m_zdd ;
Real_t* m_fx ; /* forces */
Real_t* m_fy ;
Real_t* m_fz ;
/* tmp arrays that are allocated globally for OpenACC */
Real_t* m_fx_elem ;
Real_t* m_fy_elem ;
Real_t* m_fz_elem ;
Real_t* m_dvdx ;
Real_t* m_dvdy ;
Real_t* m_dvdz ;
Real_t* m_x8n ;
Real_t* m_y8n ;
Real_t* m_z8n ;
Real_t* m_sigxx ;
Real_t* m_sigyy ;
Real_t* m_sigzz ;
Real_t* m_determ ;
Real_t* m_e_old ;
Real_t* m_delvc ;
Real_t* m_p_old ;
Real_t* m_q_old ;
Real_t* m_compression ;
Real_t* m_compHalfStep ;
Real_t* m_qq_old ;
Real_t* m_ql_old ;
Real_t* m_work ;
Real_t* m_p_new ;
Real_t* m_e_new ;
Real_t* m_q_new ;
Real_t* m_bvc ;
Real_t* m_pbvc ;
Real_t* m_nodalMass ; /* mass */
Index_t* m_symmX; /* symmetry plane nodesets */
Index_t* m_symmY;
Index_t* m_symmZ;
bool m_symmXempty;
bool m_symmYempty;
bool m_symmZempty;
// Element-centered
// Region information
Int_t m_numReg ;
Int_t m_cost; //imbalance cost
Int_t *m_regElemSize ; // Size of region sets
Index_t *m_regNumList ; // Region number per domain element
Index_t **m_regElemlist ; // region indexset
Index_t* m_matElemlist ; /* material indexset */
Index_t* m_nodelist ; /* elemToNode connectivity */
Index_t* m_lxim ; /* element connectivity across each face */
Index_t* m_lxip ;
Index_t* m_letam ;
Index_t* m_letap ;
Index_t* m_lzetam ;
Index_t* m_lzetap ;
Int_t* m_elemBC ; /* symmetry/free-surface flags for each elem face */
Real_t* m_dxx ; /* principal strains -- temporary */
Real_t* m_dyy ;
Real_t* m_dzz ;
Real_t* m_delv_xi ; /* velocity gradient -- temporary */
Real_t* m_delv_eta ;
Real_t* m_delv_zeta ;
Real_t* m_delx_xi ; /* coordinate gradient -- temporary */
Real_t* m_delx_eta ;
Real_t* m_delx_zeta ;
Real_t* m_e ; /* energy */
Real_t* m_p ; /* pressure */
Real_t* m_q ; /* q */
Real_t* m_ql ; /* linear term for q */
Real_t* m_qq ; /* quadratic term for q */
Real_t* m_v ; /* relative volume */
Real_t* m_volo ; /* reference volume */
Real_t* m_vnew ; /* new relative volume -- temporary */
Real_t* m_delv ; /* m_vnew - m_v */
Real_t* m_vdov ; /* volume derivative over volume */
Real_t* m_arealg ; /* characteristic length of an element */
Real_t* m_ss ; /* "sound speed" */
Real_t* m_elemMass ; /* mass */
// Cutoffs (treat as constants)
Real_t m_e_cut ; // energy tolerance
Real_t m_p_cut ; // pressure tolerance
Real_t m_q_cut ; // q tolerance
Real_t m_v_cut ; // relative volume tolerance
Real_t m_u_cut ; // velocity tolerance
// Other constants (usually setable, but hardcoded in this proxy app)
Real_t m_hgcoef ; // hourglass control
Real_t m_ss4o3 ;
Real_t m_qstop ; // excessive q indicator
Real_t m_monoq_max_slope ;
Real_t m_monoq_limiter_mult ;
Real_t m_qlc_monoq ; // linear term coef for q
Real_t m_qqc_monoq ; // quadratic term coef for q
Real_t m_qqc ;
Real_t m_eosvmax ;
Real_t m_eosvmin ;
Real_t m_pmin ; // pressure floor
Real_t m_emin ; // energy floor
Real_t m_dvovmax ; // maximum allowable volume change
Real_t m_refdens ; // reference density
// Variables to keep track of timestep, simulation time, and cycle
Real_t m_dtcourant ; // courant constraint
Real_t m_dthydro ; // volume change constraint
Int_t m_cycle ; // iteration count for simulation
Real_t m_dtfixed ; // fixed time increment
Real_t m_time ; // current time
Real_t m_deltatime ; // variable time increment
Real_t m_deltatimemultlb ;
Real_t m_deltatimemultub ;
Real_t m_dtmax ; // maximum allowable time increment
Real_t m_stoptime ; // end time for simulation
Int_t m_numRanks ;
Index_t m_colLoc ;
Index_t m_rowLoc ;
Index_t m_planeLoc ;
Index_t m_tp ;
Index_t m_sizeX ;
Index_t m_sizeY ;
Index_t m_sizeZ ;
Index_t m_numElem ;
Index_t m_numNode ;
Index_t m_maxPlaneSize ;
Index_t m_maxEdgeSize ;
// OMP hack
Index_t *m_nodeElemCount ;
Index_t *m_nodeElemStart ;
Index_t *m_nodeElemCornerList ;
// Used in setup
Index_t m_rowMin, m_rowMax;
Index_t m_colMin, m_colMax;
Index_t m_planeMin, m_planeMax ;
/******************************************/
/* Work Routines */
static inline
void TimeIncrement()
{
Real_t targetdt = m_stoptime - m_time ;
if ((m_dtfixed <= (Real_t)(0.0)) && (m_cycle != (Int_t)(0))) {
Real_t ratio ;
Real_t olddt = m_deltatime ;
/* This will require a reduction in parallel */
Real_t gnewdt = (Real_t)(1.0e+20) ;
Real_t newdt ;
if (m_dtcourant < gnewdt) {
gnewdt = m_dtcourant / (Real_t)(2.0) ;
}
if (m_dthydro < gnewdt) {
gnewdt = m_dthydro * (Real_t)(2.0) / (Real_t)(3.0) ;
}
#if USE_MPI
MPI_Allreduce(&gnewdt, &newdt, 1, ((sizeof(Real_t) == 4) ? MPI_FLOAT : MPI_DOUBLE), MPI_MIN, MPI_COMM_WORLD) ;
#else
newdt = gnewdt;
#endif
ratio = newdt / olddt ;
if (ratio >= (Real_t)(1.0)) {
if (ratio < m_deltatimemultlb) {
newdt = olddt ;
}
else if (ratio > m_deltatimemultub) {
newdt = olddt*m_deltatimemultub ;
}
}
if (newdt > m_dtmax) {
newdt = m_dtmax ;
}
m_deltatime = newdt ;
}
/* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */
if ((targetdt > m_deltatime) &&
(targetdt < ((Real_t)(4.0) * m_deltatime / (Real_t)(3.0))) ) {
targetdt = (Real_t)(2.0) * m_deltatime / (Real_t)(3.0) ;
}
if (targetdt < m_deltatime) {
m_deltatime = targetdt ;
}
m_time += m_deltatime ;
++m_cycle ;
}
/******************************************/
static inline
void InitStressTermsForElems(Real_t *p, Real_t *q,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
Index_t numElem)
{
//
// pull in the stresses appropriate to the hydro integration
//
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(p[numElem], q[numElem], \
sigxx[numElem],sigyy[numElem],sigzz[numElem]) async(0)
#else
#pragma acc parallel loop present(p[numElem], q[numElem], \
sigxx[numElem],sigyy[numElem],sigzz[numElem])
#endif
#else
#pragma omp parallel for firstprivate(numElem)
#endif
for (i = 0 ; i < numElem ; ++i){
sigxx[i] = sigyy[i] = sigzz[i] = - p[i] - q[i] ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
#define CalcElemShapeFunctionDerivatives_unrolled(x,y,z,b,volume) \
do {\
Real_t fjxxi, fjxet, fjxze;\
Real_t fjyxi, fjyet, fjyze;\
Real_t fjzxi, fjzet, fjzze;\
Real_t cjxxi, cjxet, cjxze;\
Real_t cjyxi, cjyet, cjyze;\
Real_t cjzxi, cjzet, cjzze;\
\
fjxxi = (Real_t)(.125) * ( (x.v6-x.v0) + (x.v5-x.v3) - (x.v7-x.v1) - (x.v4-x.v2) );\
fjxet = (Real_t)(.125) * ( (x.v6-x.v0) - (x.v5-x.v3) + (x.v7-x.v1) - (x.v4-x.v2) );\
fjxze = (Real_t)(.125) * ( (x.v6-x.v0) + (x.v5-x.v3) + (x.v7-x.v1) + (x.v4-x.v2) );\
\
fjyxi = (Real_t)(.125) * ( (y.v6-y.v0) + (y.v5-y.v3) - (y.v7-y.v1) - (y.v4-y.v2) );\
fjyet = (Real_t)(.125) * ( (y.v6-y.v0) - (y.v5-y.v3) + (y.v7-y.v1) - (y.v4-y.v2) );\
fjyze = (Real_t)(.125) * ( (y.v6-y.v0) + (y.v5-y.v3) + (y.v7-y.v1) + (y.v4-y.v2) );\
\
fjzxi = (Real_t)(.125) * ( (z.v6-z.v0) + (z.v5-z.v3) - (z.v7-z.v1) - (z.v4-z.v2) );\
fjzet = (Real_t)(.125) * ( (z.v6-z.v0) - (z.v5-z.v3) + (z.v7-z.v1) - (z.v4-z.v2) );\
fjzze = (Real_t)(.125) * ( (z.v6-z.v0) + (z.v5-z.v3) + (z.v7-z.v1) + (z.v4-z.v2) );\
\
/* compute cofactors */\
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);\
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);\
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);\
\
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);\
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);\
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);\
\
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);\
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);\
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);\
\
/* calculate partials :\
this need only be done for l = 0,1,2,3 since , by symmetry ,\
(6,7,4,5) = - (0,1,2,3) .\
*/\
(b.v0_0) = - cjxxi - cjxet - cjxze;\
(b.v0_1) = cjxxi - cjxet - cjxze;\
(b.v0_2) = cjxxi + cjxet - cjxze;\
(b.v0_3) = - cjxxi + cjxet - cjxze;\
(b.v0_4) = -(b.v0_2);\
(b.v0_5) = -(b.v0_3);\
(b.v0_6) = -(b.v0_0);\
(b.v0_7) = -(b.v0_1);\
\
(b.v1_0) = - cjyxi - cjyet - cjyze;\
(b.v1_1) = cjyxi - cjyet - cjyze;\
(b.v1_2) = cjyxi + cjyet - cjyze;\
(b.v1_3) = - cjyxi + cjyet - cjyze;\
(b.v1_4) = -(b.v1_2);\
(b.v1_5) = -(b.v1_3);\
(b.v1_6) = -(b.v1_0);\
(b.v1_7) = -(b.v1_1);\
\
(b.v2_0) = - cjzxi - cjzet - cjzze;\
(b.v2_1) = cjzxi - cjzet - cjzze;\
(b.v2_2) = cjzxi + cjzet - cjzze;\
(b.v2_3) = - cjzxi + cjzet - cjzze;\
(b.v2_4) = -(b.v2_2);\
(b.v2_5) = -(b.v2_3);\
(b.v2_6) = -(b.v2_0);\
(b.v2_7) = -(b.v2_1);\
\
/* calculate jacobian determinant (volume) */\
(volume) = (Real_t)(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);\
} while(0)\
/******************************************/
//static inline
#define SumElemFaceNormal(normalX0, normalY0, normalZ0,\
normalX1, normalY1, normalZ1,\
normalX2, normalY2, normalZ2,\
normalX3, normalY3, normalZ3,\
x0, y0, z0,\
x1, y1, z1,\
x2, y2, z2,\
x3, y3, z3)\
do {\
Real_t bisectX0 = (Real_t)(0.5) * ((x3) + (x2) - (x1) - (x0));\
Real_t bisectY0 = (Real_t)(0.5) * ((y3) + (y2) - (y1) - (y0));\
Real_t bisectZ0 = (Real_t)(0.5) * ((z3) + (z2) - (z1) - (z0));\
Real_t bisectX1 = (Real_t)(0.5) * ((x2) + (x1) - (x3) - (x0));\
Real_t bisectY1 = (Real_t)(0.5) * ((y2) + (y1) - (y3) - (y0));\
Real_t bisectZ1 = (Real_t)(0.5) * ((z2) + (z1) - (z3) - (z0));\
Real_t areaX = (Real_t)(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);\
Real_t areaY = (Real_t)(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);\
Real_t areaZ = (Real_t)(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);\
\
(normalX0) += areaX;\
(normalX1) += areaX;\
(normalX2) += areaX;\
(normalX3) += areaX;\
\
(normalY0) += areaY;\
(normalY1) += areaY;\
(normalY2) += areaY;\
(normalY3) += areaY;\
\
(normalZ0) += areaZ;\
(normalZ1) += areaZ;\
(normalZ2) += areaZ;\
(normalZ3) += areaZ;\
} while(0)
/******************************************/
#define CalcElemNodeNormals_unrolled(B,x,y,z)\
do {\
(B.v0_0) = (Real_t)(0.0);\
(B.v1_0) = (Real_t)(0.0);\
(B.v2_0) = (Real_t)(0.0);\
(B.v0_1) = (Real_t)(0.0);\
(B.v1_1) = (Real_t)(0.0);\
(B.v2_1) = (Real_t)(0.0);\
(B.v0_2) = (Real_t)(0.0);\
(B.v1_2) = (Real_t)(0.0);\
(B.v2_2) = (Real_t)(0.0);\
(B.v0_3) = (Real_t)(0.0);\
(B.v1_3) = (Real_t)(0.0);\
(B.v2_3) = (Real_t)(0.0);\
(B.v0_4) = (Real_t)(0.0);\
(B.v1_4) = (Real_t)(0.0);\
(B.v2_4) = (Real_t)(0.0);\
(B.v0_5) = (Real_t)(0.0);\
(B.v1_5) = (Real_t)(0.0);\
(B.v2_5) = (Real_t)(0.0);\
(B.v0_6) = (Real_t)(0.0);\
(B.v1_6) = (Real_t)(0.0);\
(B.v2_6) = (Real_t)(0.0);\
(B.v0_7) = (Real_t)(0.0);\
(B.v1_7) = (Real_t)(0.0);\
(B.v2_7) = (Real_t)(0.0);\
/* evaluate face one: nodes 0, 1, 2, 3 */\
SumElemFaceNormal((B.v0_0), (B.v1_0), (B.v2_0),\
(B.v0_1), (B.v1_1), (B.v2_1),\
(B.v0_2), (B.v1_2), (B.v2_2),\
(B.v0_3), (B.v1_3), (B.v2_3),\
(x.v0), (y.v0), (z.v0), (x.v1), (y.v1), (z.v1),\
(x.v2), (y.v2), (z.v2), (x.v3), (y.v3), (z.v3));\
/* evaluate face two: nodes 0, 4, 5, 1 */\
SumElemFaceNormal((B.v0_0), (B.v1_0), (B.v2_0),\
(B.v0_4), (B.v1_4), (B.v2_4),\
(B.v0_5), (B.v1_5), (B.v2_5),\
(B.v0_1), (B.v1_1), (B.v2_1),\
(x.v0), (y.v0), (z.v0), (x.v4), (y.v4), (z.v4),\
(x.v5), (y.v5), (z.v5), (x.v1), (y.v1), (z.v1));\
/* evaluate face three: nodes 1, 5, 6, 2 */\
SumElemFaceNormal((B.v0_1), (B.v1_1), (B.v2_1),\
(B.v0_5), (B.v1_5), (B.v2_5),\
(B.v0_6), (B.v1_6), (B.v2_6),\
(B.v0_2), (B.v1_2), (B.v2_2),\
(x.v1), (y.v1), (z.v1), (x.v5), (y.v5), (z.v5),\
(x.v6), (y.v6), (z.v6), (x.v2), (y.v2), (z.v2));\
/* evaluate face four: nodes 2, 6, 7, 3 */\
SumElemFaceNormal((B.v0_2), (B.v1_2), (B.v2_2),\
(B.v0_6), (B.v1_6), (B.v2_6),\
(B.v0_7), (B.v1_7), (B.v2_7),\
(B.v0_3), (B.v1_3), (B.v2_3),\
(x.v2), (y.v2), (z.v2), (x.v6), (y.v6), (z.v6),\
(x.v7), (y.v7), (z.v7), (x.v3), (y.v3), (z.v3));\
/* evaluate face five: nodes 3, 7, 4, 0 */\
SumElemFaceNormal((B.v0_3), (B.v1_3), (B.v2_3),\
(B.v0_7), (B.v1_7), (B.v2_7),\
(B.v0_4), (B.v1_4), (B.v2_4),\
(B.v0_0), (B.v1_0), (B.v2_0),\
(x.v3), (y.v3), (z.v3), (x.v7), (y.v7), (z.v7),\
(x.v4), (y.v4), (z.v4), (x.v0), (y.v0), (z.v0));\
/* evaluate face six: nodes 4, 7, 6, 5 */\
SumElemFaceNormal((B.v0_4), (B.v1_4), (B.v2_4),\
(B.v0_7), (B.v1_7), (B.v2_7),\
(B.v0_6), (B.v1_6), (B.v2_6),\
(B.v0_5), (B.v1_5), (B.v2_5),\
(x.v4), (y.v4), (z.v4), (x.v7), (y.v7), (z.v7),\
(x.v6), (y.v6), (z.v6), (x.v5), (y.v5), (z.v5));\
} while(0)
/******************************************/
static inline
void IntegrateStressForElems( Index_t *nodelist,
Real_t *x, Real_t *y, Real_t *z,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem,
Index_t *nodeElemCount,
Index_t *nodeElemStart,
Index_t *nodeElemCornerList,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
Real_t *determ, Index_t numElem, Index_t numNode)
{
volatile Index_t numElem8 = numElem * 8 ;
Index_t k;
// loop over all elements
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(x[numNode], \
y[numNode], \
z[numNode], \
determ[numElem], \
nodelist[numElem8], \
sigxx[numElem], \
sigyy[numElem], \
sigzz[numElem], \
fx_elem[numElem8], \
fy_elem[numElem8], \
fz_elem[numElem8]) async(0)
#else
#pragma acc parallel loop present(x[numNode], \
y[numNode], \
z[numNode], \
determ[numElem], \
nodelist[numElem8], \
sigxx[numElem], \
sigyy[numElem], \
sigzz[numElem], \
fx_elem[numElem8], \
fy_elem[numElem8], \
fz_elem[numElem8])
#endif
#else
#pragma omp parallel for firstprivate(numElem)
#endif
for(k = 0; k < numElem; ++k )
{
const Index_t *elemToNode = &(nodelist[8*k]);
bmat B; // shape function derivatives
val8 x_local;
val8 y_local;
val8 z_local;
Index_t gnode;
// get nodal coordinates from global arrays and copy into local arrays.
// Loop unrolled because the PGI OpenACC implementation currently stores
// locally-defined arrays in a global, shared context. Thus we have to use
// scalars instead to get them in registers.
gnode = elemToNode[0];
x_local.v0 = x[gnode];
y_local.v0 = y[gnode];
z_local.v0 = z[gnode];
gnode = elemToNode[1];
x_local.v1 = x[gnode];
y_local.v1 = y[gnode];
z_local.v1 = z[gnode];
gnode = elemToNode[2];
x_local.v2 = x[gnode];
y_local.v2 = y[gnode];
z_local.v2 = z[gnode];
gnode = elemToNode[3];
x_local.v3 = x[gnode];
y_local.v3 = y[gnode];
z_local.v3 = z[gnode];
gnode = elemToNode[4];
x_local.v4 = x[gnode];
y_local.v4 = y[gnode];
z_local.v4 = z[gnode];
gnode = elemToNode[5];
x_local.v5 = x[gnode];
y_local.v5 = y[gnode];
z_local.v5 = z[gnode];
gnode = elemToNode[6];
x_local.v6 = x[gnode];
y_local.v6 = y[gnode];
z_local.v6 = z[gnode];
gnode = elemToNode[7];
x_local.v7 = x[gnode];
y_local.v7 = y[gnode];
z_local.v7 = z[gnode];
// Volume calculation involves extra work for numerical consistency
CalcElemShapeFunctionDerivatives_unrolled(x_local, y_local, z_local, B, determ[k]);
CalcElemNodeNormals_unrolled( B, x_local, y_local, z_local );
// Eliminate thread writing conflicts at the nodes by giving
// each element its own copy to write to
// NOTE: This is a manually inlined macro. Moving it back into macro form
// requires some more pointer arithmetic which causes the current
// PGI compiler to segfault during compilation (version 13.6-accel).
fx_elem[k*8 + 0] = -( sigxx[k] * B.v0_0 );
fy_elem[k*8 + 0] = -( sigyy[k] * B.v1_0 );
fz_elem[k*8 + 0] = -( sigzz[k] * B.v2_0 );
fx_elem[k*8 + 1] = -( sigxx[k] * B.v0_1 );
fy_elem[k*8 + 1] = -( sigyy[k] * B.v1_1 );
fz_elem[k*8 + 1] = -( sigzz[k] * B.v2_1 );
fx_elem[k*8 + 2] = -( sigxx[k] * B.v0_2 );
fy_elem[k*8 + 2] = -( sigyy[k] * B.v1_2 );
fz_elem[k*8 + 2] = -( sigzz[k] * B.v2_2 );
fx_elem[k*8 + 3] = -( sigxx[k] * B.v0_3 );
fy_elem[k*8 + 3] = -( sigyy[k] * B.v1_3 );
fz_elem[k*8 + 3] = -( sigzz[k] * B.v2_3 );
fx_elem[k*8 + 4] = -( sigxx[k] * B.v0_4 );
fy_elem[k*8 + 4] = -( sigyy[k] * B.v1_4 );
fz_elem[k*8 + 4] = -( sigzz[k] * B.v2_4 );
fx_elem[k*8 + 5] = -( sigxx[k] * B.v0_5 );
fy_elem[k*8 + 5] = -( sigyy[k] * B.v1_5 );
fz_elem[k*8 + 5] = -( sigzz[k] * B.v2_5 );
fx_elem[k*8 + 6] = -( sigxx[k] * B.v0_6 );
fy_elem[k*8 + 6] = -( sigyy[k] * B.v1_6 );
fz_elem[k*8 + 6] = -( sigzz[k] * B.v2_6 );
fx_elem[k*8 + 7] = -( sigxx[k] * B.v0_7 );
fy_elem[k*8 + 7] = -( sigyy[k] * B.v1_7 );
fz_elem[k*8 + 7] = -( sigzz[k] * B.v2_7 );
}
// If threaded, then we need to copy the data out of the temporary
// arrays used above into the final forces field
/* volatile because otherwise it will be optimized out of the pragma and
break things. */
volatile Index_t nCorner = nodeElemStart[numNode-1]
+ nodeElemCount[numNode-1];
Index_t gnode;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc kernels loop independent vector(256) \
present(fx_elem[numElem8], \
fy_elem[numElem8], \
fz_elem[numElem8], \
fx[numElem], \
fy[numElem], \
fz[numElem], \
nodeElemCount[numNode], \
nodeElemCornerList[nCorner], \
nodeElemStart[numNode]) async(0)
#else
#pragma acc kernels loop independent vector(256) \
present(fx_elem[numElem8], \
fy_elem[numElem8], \
fz_elem[numElem8], \
fx[numElem], \
fy[numElem], \
fz[numElem], \
nodeElemCount[numNode], \
nodeElemCornerList[nCorner], \
nodeElemStart[numNode])
#endif
#else
#pragma omp parallel for firstprivate(numNode)
#endif
for( gnode=0 ; gnode<numNode ; ++gnode )
{
Index_t count = nodeElemCount[gnode] ;
Index_t start = nodeElemStart[gnode] ;
Real_t fx_tmp = (Real_t)(0.0) ;
Real_t fy_tmp = (Real_t)(0.0) ;
Real_t fz_tmp = (Real_t)(0.0) ;
Index_t i;
for (i=0 ; i < count ; ++i) {
Index_t elem = nodeElemCornerList[start+i] ;
fx_tmp += fx_elem[elem] ;
fy_tmp += fy_elem[elem] ;
fz_tmp += fz_elem[elem] ;
}
fx[gnode] = fx_tmp ;
fy[gnode] = fy_tmp ;
fz[gnode] = fz_tmp ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
//static inline
#define CollectDomainNodesToElemNodes(x, y, z, \
elemToNode, \
elemX, elemY, elemZ) \
do { \
Index_t nd0i = (elemToNode)[0] ; \
Index_t nd1i = (elemToNode)[1] ; \
Index_t nd2i = (elemToNode)[2] ; \
Index_t nd3i = (elemToNode)[3] ; \
Index_t nd4i = (elemToNode)[4] ; \
Index_t nd5i = (elemToNode)[5] ; \
Index_t nd6i = (elemToNode)[6] ; \
Index_t nd7i = (elemToNode)[7] ; \
\
(elemX).v0 = (x)[nd0i]; \
(elemX).v1 = (x)[nd1i]; \
(elemX).v2 = (x)[nd2i]; \
(elemX).v3 = (x)[nd3i]; \
(elemX).v4 = (x)[nd4i]; \
(elemX).v5 = (x)[nd5i]; \
(elemX).v6 = (x)[nd6i]; \
(elemX).v7 = (x)[nd7i]; \
\
(elemY).v0 = (y)[nd0i]; \
(elemY).v1 = (y)[nd1i]; \
(elemY).v2 = (y)[nd2i]; \
(elemY).v3 = (y)[nd3i]; \
(elemY).v4 = (y)[nd4i]; \
(elemY).v5 = (y)[nd5i]; \
(elemY).v6 = (y)[nd6i]; \
(elemY).v7 = (y)[nd7i]; \
\
(elemZ).v0 = (z)[nd0i]; \
(elemZ).v1 = (z)[nd1i]; \
(elemZ).v2 = (z)[nd2i]; \
(elemZ).v3 = (z)[nd3i]; \
(elemZ).v4 = (z)[nd4i]; \
(elemZ).v5 = (z)[nd5i]; \
(elemZ).v6 = (z)[nd6i]; \
(elemZ).v7 = (z)[nd7i]; \
} while(0)
/******************************************/
//static inline
#define VoluDer(x0, x1, x2, \
x3, x4, x5, \
y0, y1, y2, \
y3, y4, y5, \
z0, z1, z2, \
z3, z4, z5, \
dvdx, dvdy, dvdz) \
do { \
const Real_t twelfth = (Real_t)(1.0) / (Real_t)(12.0) ; \
\
(dvdx) = \
((y1) + (y2)) * ((z0) + (z1)) - ((y0) + (y1)) * ((z1) + (z2)) + \
((y0) + (y4)) * ((z3) + (z4)) - ((y3) + (y4)) * ((z0) + (z4)) - \
((y2) + (y5)) * ((z3) + (z5)) + ((y3) + (y5)) * ((z2) + (z5)); \
(dvdy) = \
- ((x1) + (x2)) * ((z0) + (z1)) + ((x0) + (x1)) * ((z1) + (z2)) - \
((x0) + (x4)) * ((z3) + (z4)) + ((x3) + (x4)) * ((z0) + (z4)) + \
((x2) + (x5)) * ((z3) + (z5)) - ((x3) + (x5)) * ((z2) + (z5)); \
\
(dvdz) = \
- ((y1) + (y2)) * ((x0) + (x1)) + ((y0) + (y1)) * ((x1) + (x2)) - \
((y0) + (y4)) * ((x3) + (x4)) + ((y3) + (y4)) * ((x0) + (x4)) + \
((y2) + (y5)) * ((x3) + (x5)) - ((y3) + (y5)) * ((x2) + (x5)); \
\
(dvdx) *= twelfth; \
(dvdy) *= twelfth; \
(dvdz) *= twelfth; \
} while(0)
/******************************************/
///static inline
#define CalcElemVolumeDerivative(dvdx, dvdy, dvdz, \
x, y, z) \
do { \
VoluDer(x.v1, x.v2, x.v3, x.v4, x.v5, x.v7, \
y.v1, y.v2, y.v3, y.v4, y.v5, y.v7, \
z.v1, z.v2, z.v3, z.v4, z.v5, z.v7, \
dvdx.v0, dvdy.v0, dvdz.v0); \
VoluDer(x.v0, x.v1, x.v2, x.v7, x.v4, x.v6, \
y.v0, y.v1, y.v2, y.v7, y.v4, y.v6, \
z.v0, z.v1, z.v2, z.v7, z.v4, z.v6, \
dvdx.v3, dvdy.v3, dvdz.v3); \
VoluDer(x.v3, x.v0, x.v1, x.v6, x.v7, x.v5, \
y.v3, y.v0, y.v1, y.v6, y.v7, y.v5, \
z.v3, z.v0, z.v1, z.v6, z.v7, z.v5, \
dvdx.v2, dvdy.v2, dvdz.v2); \
VoluDer(x.v2, x.v3, x.v0, x.v5, x.v6, x.v4, \
y.v2, y.v3, y.v0, y.v5, y.v6, y.v4, \
z.v2, z.v3, z.v0, z.v5, z.v6, z.v4, \
dvdx.v1, dvdy.v1, dvdz.v1); \
VoluDer(x.v7, x.v6, x.v5, x.v0, x.v3, x.v1, \
y.v7, y.v6, y.v5, y.v0, y.v3, y.v1, \
z.v7, z.v6, z.v5, z.v0, z.v3, z.v1, \
dvdx.v4, dvdy.v4, dvdz.v4); \
VoluDer(x.v4, x.v7, x.v6, x.v1, x.v0, x.v2, \
y.v4, y.v7, y.v6, y.v1, y.v0, y.v2, \
z.v4, z.v7, z.v6, z.v1, z.v0, z.v2, \
dvdx.v5, dvdy.v5, dvdz.v5); \
VoluDer(x.v5, x.v4, x.v7, x.v2, x.v1, x.v3, \
y.v5, y.v4, y.v7, y.v2, y.v1, y.v3, \
z.v5, z.v4, z.v7, z.v2, z.v1, z.v3, \
dvdx.v6, dvdy.v6, dvdz.v6); \
VoluDer(x.v6, x.v5, x.v4, x.v3, x.v2, x.v0, \
y.v6, y.v5, y.v4, y.v3, y.v2, y.v0, \
z.v6, z.v5, z.v4, z.v3, z.v2, z.v0, \
dvdx.v7, dvdy.v7, dvdz.v7); \
} while(0)
/******************************************/
//static inline
#define CalcElemFBHourglassForce(xd, yd, zd, \
hourgam, coefficient, \
hgfx, hgfy, hgfz) \
do { \
val8 hxx; \
hxx.v0 = hourgam.v0_0 * xd.v0 + hourgam.v1_0 * xd.v1 + \
hourgam.v2_0 * xd.v2 + hourgam.v3_0 * xd.v3 + \
hourgam.v4_0 * xd.v4 + hourgam.v5_0 * xd.v5 + \
hourgam.v6_0 * xd.v6 + hourgam.v7_0 * xd.v7; \
hxx.v1 = hourgam.v0_1 * xd.v0 + hourgam.v1_1 * xd.v1 + \
hourgam.v2_1 * xd.v2 + hourgam.v3_1 * xd.v3 + \
hourgam.v4_1 * xd.v4 + hourgam.v5_1 * xd.v5 + \
hourgam.v6_1 * xd.v6 + hourgam.v7_1 * xd.v7; \
hxx.v2 = hourgam.v0_2 * xd.v0 + hourgam.v1_2 * xd.v1 + \
hourgam.v2_2 * xd.v2 + hourgam.v3_2 * xd.v3 + \
hourgam.v4_2 * xd.v4 + hourgam.v5_2 * xd.v5 + \
hourgam.v6_2 * xd.v6 + hourgam.v7_2 * xd.v7; \
hxx.v3 = hourgam.v0_3 * xd.v0 + hourgam.v1_3 * xd.v1 + \
hourgam.v2_3 * xd.v2 + hourgam.v3_3 * xd.v3 + \
hourgam.v4_3 * xd.v4 + hourgam.v5_3 * xd.v5 + \
hourgam.v6_3 * xd.v6 + hourgam.v7_3 * xd.v7; \
\
hgfx.v0 = coefficient * \
(hourgam.v0_0 * hxx.v0 + hourgam.v0_1 * hxx.v1 + \
hourgam.v0_2 * hxx.v2 + hourgam.v0_3 * hxx.v3); \
hgfx.v1 = coefficient * \
(hourgam.v1_0 * hxx.v0 + hourgam.v1_1 * hxx.v1 + \
hourgam.v1_2 * hxx.v2 + hourgam.v1_3 * hxx.v3); \
hgfx.v2 = coefficient * \
(hourgam.v2_0 * hxx.v0 + hourgam.v2_1 * hxx.v1 + \
hourgam.v2_2 * hxx.v2 + hourgam.v2_3 * hxx.v3); \
hgfx.v3 = coefficient * \
(hourgam.v3_0 * hxx.v0 + hourgam.v3_1 * hxx.v1 + \
hourgam.v3_2 * hxx.v2 + hourgam.v3_3 * hxx.v3); \
hgfx.v4 = coefficient * \
(hourgam.v4_0 * hxx.v0 + hourgam.v4_1 * hxx.v1 + \
hourgam.v4_2 * hxx.v2 + hourgam.v4_3 * hxx.v3); \
hgfx.v5 = coefficient * \
(hourgam.v5_0 * hxx.v0 + hourgam.v5_1 * hxx.v1 + \
hourgam.v5_2 * hxx.v2 + hourgam.v5_3 * hxx.v3); \
hgfx.v6 = coefficient * \
(hourgam.v6_0 * hxx.v0 + hourgam.v6_1 * hxx.v1 + \
hourgam.v6_2 * hxx.v2 + hourgam.v6_3 * hxx.v3); \
hgfx.v7 = coefficient * \
(hourgam.v7_0 * hxx.v0 + hourgam.v7_1 * hxx.v1 + \
hourgam.v7_2 * hxx.v2 + hourgam.v7_3 * hxx.v3); \
\
hxx.v0 = hourgam.v0_0 * yd.v0 + hourgam.v1_0 * yd.v1 + \
hourgam.v2_0 * yd.v2 + hourgam.v3_0 * yd.v3 + \
hourgam.v4_0 * yd.v4 + hourgam.v5_0 * yd.v5 + \
hourgam.v6_0 * yd.v6 + hourgam.v7_0 * yd.v7; \
hxx.v1 = hourgam.v0_1 * yd.v0 + hourgam.v1_1 * yd.v1 + \
hourgam.v2_1 * yd.v2 + hourgam.v3_1 * yd.v3 + \
hourgam.v4_1 * yd.v4 + hourgam.v5_1 * yd.v5 + \
hourgam.v6_1 * yd.v6 + hourgam.v7_1 * yd.v7; \
hxx.v2 = hourgam.v0_2 * yd.v0 + hourgam.v1_2 * yd.v1 + \
hourgam.v2_2 * yd.v2 + hourgam.v3_2 * yd.v3 + \
hourgam.v4_2 * yd.v4 + hourgam.v5_2 * yd.v5 + \
hourgam.v6_2 * yd.v6 + hourgam.v7_2 * yd.v7; \
hxx.v3 = hourgam.v0_3 * yd.v0 + hourgam.v1_3 * yd.v1 + \
hourgam.v2_3 * yd.v2 + hourgam.v3_3 * yd.v3 + \
hourgam.v4_3 * yd.v4 + hourgam.v5_3 * yd.v5 + \
hourgam.v6_3 * yd.v6 + hourgam.v7_3 * yd.v7; \
\
hgfy.v0 = coefficient * \
(hourgam.v0_0 * hxx.v0 + hourgam.v0_1 * hxx.v1 + \
hourgam.v0_2 * hxx.v2 + hourgam.v0_3 * hxx.v3); \
hgfy.v1 = coefficient * \
(hourgam.v1_0 * hxx.v0 + hourgam.v1_1 * hxx.v1 + \
hourgam.v1_2 * hxx.v2 + hourgam.v1_3 * hxx.v3); \
hgfy.v2 = coefficient * \
(hourgam.v2_0 * hxx.v0 + hourgam.v2_1 * hxx.v1 + \
hourgam.v2_2 * hxx.v2 + hourgam.v2_3 * hxx.v3); \
hgfy.v3 = coefficient * \
(hourgam.v3_0 * hxx.v0 + hourgam.v3_1 * hxx.v1 + \
hourgam.v3_2 * hxx.v2 + hourgam.v3_3 * hxx.v3); \
hgfy.v4 = coefficient * \
(hourgam.v4_0 * hxx.v0 + hourgam.v4_1 * hxx.v1 + \
hourgam.v4_2 * hxx.v2 + hourgam.v4_3 * hxx.v3); \
hgfy.v5 = coefficient * \
(hourgam.v5_0 * hxx.v0 + hourgam.v5_1 * hxx.v1 + \
hourgam.v5_2 * hxx.v2 + hourgam.v5_3 * hxx.v3); \
hgfy.v6 = coefficient * \
(hourgam.v6_0 * hxx.v0 + hourgam.v6_1 * hxx.v1 + \
hourgam.v6_2 * hxx.v2 + hourgam.v6_3 * hxx.v3); \
hgfy.v7 = coefficient * \
(hourgam.v7_0 * hxx.v0 + hourgam.v7_1 * hxx.v1 + \
hourgam.v7_2 * hxx.v2 + hourgam.v7_3 * hxx.v3); \
\
hxx.v0 = hourgam.v0_0 * zd.v0 + hourgam.v1_0 * zd.v1 + \
hourgam.v2_0 * zd.v2 + hourgam.v3_0 * zd.v3 + \
hourgam.v4_0 * zd.v4 + hourgam.v5_0 * zd.v5 + \
hourgam.v6_0 * zd.v6 + hourgam.v7_0 * zd.v7; \
hxx.v1 = hourgam.v0_1 * zd.v0 + hourgam.v1_1 * zd.v1 + \
hourgam.v2_1 * zd.v2 + hourgam.v3_1 * zd.v3 + \
hourgam.v4_1 * zd.v4 + hourgam.v5_1 * zd.v5 + \
hourgam.v6_1 * zd.v6 + hourgam.v7_1 * zd.v7; \
hxx.v2 = hourgam.v0_2 * zd.v0 + hourgam.v1_2 * zd.v1 + \
hourgam.v2_2 * zd.v2 + hourgam.v3_2 * zd.v3 + \
hourgam.v4_2 * zd.v4 + hourgam.v5_2 * zd.v5 + \
hourgam.v6_2 * zd.v6 + hourgam.v7_2 * zd.v7; \
hxx.v3 = hourgam.v0_3 * zd.v0 + hourgam.v1_3 * zd.v1 + \
hourgam.v2_3 * zd.v2 + hourgam.v3_3 * zd.v3 + \
hourgam.v4_3 * zd.v4 + hourgam.v5_3 * zd.v5 + \
hourgam.v6_3 * zd.v6 + hourgam.v7_3 * zd.v7; \
\
hgfz.v0 = coefficient * \
(hourgam.v0_0 * hxx.v0 + hourgam.v0_1 * hxx.v1 + \
hourgam.v0_2 * hxx.v2 + hourgam.v0_3 * hxx.v3); \
hgfz.v1 = coefficient * \
(hourgam.v1_0 * hxx.v0 + hourgam.v1_1 * hxx.v1 + \
hourgam.v1_2 * hxx.v2 + hourgam.v1_3 * hxx.v3); \
hgfz.v2 = coefficient * \
(hourgam.v2_0 * hxx.v0 + hourgam.v2_1 * hxx.v1 + \
hourgam.v2_2 * hxx.v2 + hourgam.v2_3 * hxx.v3); \
hgfz.v3 = coefficient * \
(hourgam.v3_0 * hxx.v0 + hourgam.v3_1 * hxx.v1 + \
hourgam.v3_2 * hxx.v2 + hourgam.v3_3 * hxx.v3); \
hgfz.v4 = coefficient * \
(hourgam.v4_0 * hxx.v0 + hourgam.v4_1 * hxx.v1 + \
hourgam.v4_2 * hxx.v2 + hourgam.v4_3 * hxx.v3); \
hgfz.v5 = coefficient * \
(hourgam.v5_0 * hxx.v0 + hourgam.v5_1 * hxx.v1 + \
hourgam.v5_2 * hxx.v2 + hourgam.v5_3 * hxx.v3); \
hgfz.v6 = coefficient * \
(hourgam.v6_0 * hxx.v0 + hourgam.v6_1 * hxx.v1 + \
hourgam.v6_2 * hxx.v2 + hourgam.v6_3 * hxx.v3); \
hgfz.v7 = coefficient * \
(hourgam.v7_0 * hxx.v0 + hourgam.v7_1 * hxx.v1 + \
hourgam.v7_2 * hxx.v2 + hourgam.v7_3 * hxx.v3); \
} while(0)
/******************************************/
#define FillHourGam \
do { \
/* i = 0 */ \
Real_t hourmodx = \
x8n[i3] * gamma[0][0] + x8n[i3+1] * gamma[0][1] + \
x8n[i3+2] * gamma[0][2] + x8n[i3+3] * gamma[0][3] + \
x8n[i3+4] * gamma[0][4] + x8n[i3+5] * gamma[0][5] + \
x8n[i3+6] * gamma[0][6] + x8n[i3+7] * gamma[0][7]; \
\
Real_t hourmody = \
y8n[i3] * gamma[0][0] + y8n[i3+1] * gamma[0][1] + \
y8n[i3+2] * gamma[0][2] + y8n[i3+3] * gamma[0][3] + \
y8n[i3+4] * gamma[0][4] + y8n[i3+5] * gamma[0][5] + \
y8n[i3+6] * gamma[0][6] + y8n[i3+7] * gamma[0][7]; \
\
Real_t hourmodz = \
z8n[i3] * gamma[0][0] + z8n[i3+1] * gamma[0][1] + \
z8n[i3+2] * gamma[0][2] + z8n[i3+3] * gamma[0][3] + \
z8n[i3+4] * gamma[0][4] + z8n[i3+5] * gamma[0][5] + \
z8n[i3+6] * gamma[0][6] + z8n[i3+7] * gamma[0][7]; \
\
hourgam.v0_0 = gamma[0][0] - volinv*(dvdx[i3 ] * hourmodx + \
dvdy[i3 ] * hourmody + \
dvdz[i3 ] * hourmodz ); \
\
hourgam.v1_0 = gamma[0][1] - volinv*(dvdx[i3+1] * hourmodx + \
dvdy[i3+1] * hourmody + \
dvdz[i3+1] * hourmodz ); \
\
hourgam.v2_0 = gamma[0][2] - volinv*(dvdx[i3+2] * hourmodx + \
dvdy[i3+2] * hourmody + \
dvdz[i3+2] * hourmodz ); \
\
hourgam.v3_0 = gamma[0][3] - volinv*(dvdx[i3+3] * hourmodx + \
dvdy[i3+3] * hourmody + \
dvdz[i3+3] * hourmodz ); \
\
hourgam.v4_0 = gamma[0][4] - volinv*(dvdx[i3+4] * hourmodx + \
dvdy[i3+4] * hourmody + \
dvdz[i3+4] * hourmodz ); \
\
hourgam.v5_0 = gamma[0][5] - volinv*(dvdx[i3+5] * hourmodx + \
dvdy[i3+5] * hourmody + \
dvdz[i3+5] * hourmodz ); \
\
hourgam.v6_0 = gamma[0][6] - volinv*(dvdx[i3+6] * hourmodx + \
dvdy[i3+6] * hourmody + \
dvdz[i3+6] * hourmodz ); \
\
hourgam.v7_0 = gamma[0][7] - volinv*(dvdx[i3+7] * hourmodx + \
dvdy[i3+7] * hourmody + \
dvdz[i3+7] * hourmodz ); \
/* i = 1 */ \
hourmodx = \
x8n[i3] * gamma[1][0] + x8n[i3+1] * gamma[1][1] + \
x8n[i3+2] * gamma[1][2] + x8n[i3+3] * gamma[1][3] + \
x8n[i3+4] * gamma[1][4] + x8n[i3+5] * gamma[1][5] + \
x8n[i3+6] * gamma[1][6] + x8n[i3+7] * gamma[1][7]; \
\
hourmody = \
y8n[i3] * gamma[1][0] + y8n[i3+1] * gamma[1][1] + \
y8n[i3+2] * gamma[1][2] + y8n[i3+3] * gamma[1][3] + \
y8n[i3+4] * gamma[1][4] + y8n[i3+5] * gamma[1][5] + \
y8n[i3+6] * gamma[1][6] + y8n[i3+7] * gamma[1][7]; \
\
hourmodz = \
z8n[i3] * gamma[1][0] + z8n[i3+1] * gamma[1][1] + \
z8n[i3+2] * gamma[1][2] + z8n[i3+3] * gamma[1][3] + \
z8n[i3+4] * gamma[1][4] + z8n[i3+5] * gamma[1][5] + \
z8n[i3+6] * gamma[1][6] + z8n[i3+7] * gamma[1][7]; \
\
hourgam.v0_1 = gamma[1][0] - volinv*(dvdx[i3 ] * hourmodx + \
dvdy[i3 ] * hourmody + \
dvdz[i3 ] * hourmodz ); \
\
hourgam.v1_1 = gamma[1][1] - volinv*(dvdx[i3+1] * hourmodx + \
dvdy[i3+1] * hourmody + \
dvdz[i3+1] * hourmodz ); \
\
hourgam.v2_1 = gamma[1][2] - volinv*(dvdx[i3+2] * hourmodx + \
dvdy[i3+2] * hourmody + \
dvdz[i3+2] * hourmodz ); \
\
hourgam.v3_1 = gamma[1][3] - volinv*(dvdx[i3+3] * hourmodx + \
dvdy[i3+3] * hourmody + \
dvdz[i3+3] * hourmodz ); \
\
hourgam.v4_1 = gamma[1][4] - volinv*(dvdx[i3+4] * hourmodx + \
dvdy[i3+4] * hourmody + \
dvdz[i3+4] * hourmodz ); \
\
hourgam.v5_1 = gamma[1][5] - volinv*(dvdx[i3+5] * hourmodx + \
dvdy[i3+5] * hourmody + \
dvdz[i3+5] * hourmodz ); \
\
hourgam.v6_1 = gamma[1][6] - volinv*(dvdx[i3+6] * hourmodx + \
dvdy[i3+6] * hourmody + \
dvdz[i3+6] * hourmodz ); \
\
hourgam.v7_1 = gamma[1][7] - volinv*(dvdx[i3+7] * hourmodx + \
dvdy[i3+7] * hourmody + \
dvdz[i3+7] * hourmodz ); \
/* i = 2 */ \
hourmodx = \
x8n[i3] * gamma[2][0] + x8n[i3+1] * gamma[2][1] + \
x8n[i3+2] * gamma[2][2] + x8n[i3+3] * gamma[2][3] + \
x8n[i3+4] * gamma[2][4] + x8n[i3+5] * gamma[2][5] + \
x8n[i3+6] * gamma[2][6] + x8n[i3+7] * gamma[2][7]; \
\
hourmody = \
y8n[i3] * gamma[2][0] + y8n[i3+1] * gamma[2][1] + \
y8n[i3+2] * gamma[2][2] + y8n[i3+3] * gamma[2][3] + \
y8n[i3+4] * gamma[2][4] + y8n[i3+5] * gamma[2][5] + \
y8n[i3+6] * gamma[2][6] + y8n[i3+7] * gamma[2][7]; \
\
hourmodz = \
z8n[i3] * gamma[2][0] + z8n[i3+1] * gamma[2][1] + \
z8n[i3+2] * gamma[2][2] + z8n[i3+3] * gamma[2][3] + \
z8n[i3+4] * gamma[2][4] + z8n[i3+5] * gamma[2][5] + \
z8n[i3+6] * gamma[2][6] + z8n[i3+7] * gamma[2][7]; \
\
hourgam.v0_2 = gamma[2][0] - volinv*(dvdx[i3 ] * hourmodx + \
dvdy[i3 ] * hourmody + \
dvdz[i3 ] * hourmodz ); \
\
hourgam.v1_2 = gamma[2][1] - volinv*(dvdx[i3+1] * hourmodx + \
dvdy[i3+1] * hourmody + \
dvdz[i3+1] * hourmodz ); \
\
hourgam.v2_2 = gamma[2][2] - volinv*(dvdx[i3+2] * hourmodx + \
dvdy[i3+2] * hourmody + \
dvdz[i3+2] * hourmodz ); \
\
hourgam.v3_2 = gamma[2][3] - volinv*(dvdx[i3+3] * hourmodx + \
dvdy[i3+3] * hourmody + \
dvdz[i3+3] * hourmodz ); \
\
hourgam.v4_2 = gamma[2][4] - volinv*(dvdx[i3+4] * hourmodx + \
dvdy[i3+4] * hourmody + \
dvdz[i3+4] * hourmodz ); \
\
hourgam.v5_2 = gamma[2][5] - volinv*(dvdx[i3+5] * hourmodx + \
dvdy[i3+5] * hourmody + \
dvdz[i3+5] * hourmodz ); \
\
hourgam.v6_2 = gamma[2][6] - volinv*(dvdx[i3+6] * hourmodx + \
dvdy[i3+6] * hourmody + \
dvdz[i3+6] * hourmodz ); \
\
hourgam.v7_2 = gamma[2][7] - volinv*(dvdx[i3+7] * hourmodx + \
dvdy[i3+7] * hourmody + \
dvdz[i3+7] * hourmodz ); \
/* i = 3 */ \
hourmodx = \
x8n[i3] * gamma[3][0] + x8n[i3+1] * gamma[3][1] + \
x8n[i3+2] * gamma[3][2] + x8n[i3+3] * gamma[3][3] + \
x8n[i3+4] * gamma[3][4] + x8n[i3+5] * gamma[3][5] + \
x8n[i3+6] * gamma[3][6] + x8n[i3+7] * gamma[3][7]; \
\
hourmody = \
y8n[i3] * gamma[3][0] + y8n[i3+1] * gamma[3][1] + \
y8n[i3+2] * gamma[3][2] + y8n[i3+3] * gamma[3][3] + \
y8n[i3+4] * gamma[3][4] + y8n[i3+5] * gamma[3][5] + \
y8n[i3+6] * gamma[3][6] + y8n[i3+7] * gamma[3][7]; \
\
hourmodz = \
z8n[i3] * gamma[3][0] + z8n[i3+1] * gamma[3][1] + \
z8n[i3+2] * gamma[3][2] + z8n[i3+3] * gamma[3][3] + \
z8n[i3+4] * gamma[3][4] + z8n[i3+5] * gamma[3][5] + \
z8n[i3+6] * gamma[3][6] + z8n[i3+7] * gamma[3][7]; \
\
hourgam.v0_3 = gamma[3][0] - volinv*(dvdx[i3 ] * hourmodx + \
dvdy[i3 ] * hourmody + \
dvdz[i3 ] * hourmodz ); \
\
hourgam.v1_3 = gamma[3][1] - volinv*(dvdx[i3+1] * hourmodx + \
dvdy[i3+1] * hourmody + \
dvdz[i3+1] * hourmodz ); \
\
hourgam.v2_3 = gamma[3][2] - volinv*(dvdx[i3+2] * hourmodx + \
dvdy[i3+2] * hourmody + \
dvdz[i3+2] * hourmodz ); \
\
hourgam.v3_3 = gamma[3][3] - volinv*(dvdx[i3+3] * hourmodx + \
dvdy[i3+3] * hourmody + \
dvdz[i3+3] * hourmodz ); \
\
hourgam.v4_3 = gamma[3][4] - volinv*(dvdx[i3+4] * hourmodx + \
dvdy[i3+4] * hourmody + \
dvdz[i3+4] * hourmodz ); \
\
hourgam.v5_3 = gamma[3][5] - volinv*(dvdx[i3+5] * hourmodx + \
dvdy[i3+5] * hourmody + \
dvdz[i3+5] * hourmodz ); \
\
hourgam.v6_3 = gamma[3][6] - volinv*(dvdx[i3+6] * hourmodx + \
dvdy[i3+6] * hourmody + \
dvdz[i3+6] * hourmodz ); \
\
hourgam.v7_3 = gamma[3][7] - volinv*(dvdx[i3+7] * hourmodx + \
dvdy[i3+7] * hourmody + \
dvdz[i3+7] * hourmodz ); \
} while(0)
static inline
void CalcFBHourglassForceForElems(
Index_t *nodelist,
Index_t *nodeElemCount,
Index_t *nodeElemStart,
Index_t *nodeElemCornerList,
Real_t *determ,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *x8n, Real_t *y8n, Real_t *z8n,
Real_t *dvdx, Real_t *dvdy, Real_t *dvdz,
Real_t hourg, Index_t numElem, Index_t numNode)
{
#if !defined(_OPENACC) && defined(_OPENMP)
Index_t numthreads = omp_get_max_threads();
#else
Index_t numthreads = 1;
#endif
/*************************************************
*
* FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass
* force.
*
*************************************************/
Index_t numElem8 = numElem * 8 ;
Real_t *ss = m_ss;
Real_t *elemMass = m_elemMass;
Real_t *xd = m_xd;
Real_t *yd = m_yd;
Real_t *zd = m_zd;
Real_t *fx_elem = m_fx_elem;
Real_t *fy_elem = m_fy_elem;
Real_t *fz_elem = m_fz_elem;
#ifdef USE_UNIFIEDMEM
Real_t (*gamma)[8] = (Real_t (*)[8])acc_create_unified(NULL, sizeof(Real_t)*4*8);
#else
Real_t gamma[4][8];
#endif
gamma[0][0] = (Real_t)( 1.);
gamma[0][1] = (Real_t)( 1.);
gamma[0][2] = (Real_t)(-1.);
gamma[0][3] = (Real_t)(-1.);
gamma[0][4] = (Real_t)(-1.);
gamma[0][5] = (Real_t)(-1.);
gamma[0][6] = (Real_t)( 1.);
gamma[0][7] = (Real_t)( 1.);
gamma[1][0] = (Real_t)( 1.);
gamma[1][1] = (Real_t)(-1.);
gamma[1][2] = (Real_t)(-1.);
gamma[1][3] = (Real_t)( 1.);
gamma[1][4] = (Real_t)(-1.);
gamma[1][5] = (Real_t)( 1.);
gamma[1][6] = (Real_t)( 1.);
gamma[1][7] = (Real_t)(-1.);
gamma[2][0] = (Real_t)( 1.);
gamma[2][1] = (Real_t)(-1.);
gamma[2][2] = (Real_t)( 1.);
gamma[2][3] = (Real_t)(-1.);
gamma[2][4] = (Real_t)( 1.);
gamma[2][5] = (Real_t)(-1.);
gamma[2][6] = (Real_t)( 1.);
gamma[2][7] = (Real_t)(-1.);
gamma[3][0] = (Real_t)(-1.);
gamma[3][1] = (Real_t)( 1.);
gamma[3][2] = (Real_t)(-1.);
gamma[3][3] = (Real_t)( 1.);
gamma[3][4] = (Real_t)( 1.);
gamma[3][5] = (Real_t)(-1.);
gamma[3][6] = (Real_t)( 1.);
gamma[3][7] = (Real_t)(-1.);
Index_t i2;
/*************************************************/
/* compute the hourglass modes */
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc kernels copyin(gamma[4][8]) \
present(fx_elem[numElem8], \
fy_elem[numElem8], \
fz_elem[numElem8], \
xd[numNode], \
yd[numNode], \
zd[numNode], \
dvdx[numElem8], \
dvdy[numElem8], \
dvdz[numElem8], \
x8n[numElem8], \
y8n[numElem8], \
z8n[numElem8], \
nodelist[numElem8],\
determ[numElem], \
ss[numElem], \
elemMass[numElem]) async(0)
#else
#pragma acc kernels copyin(gamma[4][8]) \
present(fx_elem[numElem8], \
fy_elem[numElem8], \
fz_elem[numElem8], \
xd[numNode], \
yd[numNode], \
zd[numNode], \
dvdx[numElem8], \
dvdy[numElem8], \
dvdz[numElem8], \
x8n[numElem8], \
y8n[numElem8], \
z8n[numElem8], \
nodelist[numElem8],\
determ[numElem], \
ss[numElem], \
elemMass[numElem])
#endif
#pragma acc cache(gamma)
#pragma acc loop independent
#else
#pragma omp parallel for firstprivate(numElem, hourg)
#endif
for(i2=0;i2<numElem;++i2){
val8 hgfx, hgfy, hgfz;
Real_t coefficient;
//Real_t hourgam[8][4];
hourmat hourgam;
val8 xd1, yd1, zd1;
const Index_t *elemToNode = &nodelist[i2*8];
Index_t i3=8*i2;
Real_t volinv=(Real_t)(1.0)/determ[i2];
Real_t ss1, mass1, volume13 ;
/* Large macro of unrolled loop */
FillHourGam;
/* compute forces */
/* store forces into h arrays (force arrays) */
ss1 = ss[i2];
mass1 = elemMass[i2];
volume13 = pow(determ[i2], (1.0 / 3.0));
Index_t n0si2 = elemToNode[0];
Index_t n1si2 = elemToNode[1];
Index_t n2si2 = elemToNode[2];
Index_t n3si2 = elemToNode[3];
Index_t n4si2 = elemToNode[4];
Index_t n5si2 = elemToNode[5];
Index_t n6si2 = elemToNode[6];
Index_t n7si2 = elemToNode[7];
xd1.v0 = xd[n0si2];
xd1.v1 = xd[n1si2];
xd1.v2 = xd[n2si2];
xd1.v3 = xd[n3si2];
xd1.v4 = xd[n4si2];
xd1.v5 = xd[n5si2];
xd1.v6 = xd[n6si2];
xd1.v7 = xd[n7si2];
yd1.v0 = yd[n0si2];
yd1.v1 = yd[n1si2];
yd1.v2 = yd[n2si2];
yd1.v3 = yd[n3si2];
yd1.v4 = yd[n4si2];
yd1.v5 = yd[n5si2];
yd1.v6 = yd[n6si2];
yd1.v7 = yd[n7si2];
zd1.v0 = zd[n0si2];
zd1.v1 = zd[n1si2];
zd1.v2 = zd[n2si2];
zd1.v3 = zd[n3si2];
zd1.v4 = zd[n4si2];
zd1.v5 = zd[n5si2];
zd1.v6 = zd[n6si2];
zd1.v7 = zd[n7si2];
coefficient = - hourg * (Real_t)(0.01) * ss1 * mass1 / volume13;
CalcElemFBHourglassForce(xd1,yd1,zd1, hourgam, coefficient,
hgfx, hgfy, hgfz);
// With the threaded version, we write into local arrays per elem
// so we don't have to worry about race conditions
fx_elem[i3 + 0] = hgfx.v0;
fx_elem[i3 + 1] = hgfx.v1;
fx_elem[i3 + 2] = hgfx.v2;
fx_elem[i3 + 3] = hgfx.v3;
fx_elem[i3 + 4] = hgfx.v4;
fx_elem[i3 + 5] = hgfx.v5;
fx_elem[i3 + 6] = hgfx.v6;
fx_elem[i3 + 7] = hgfx.v7;
fy_elem[i3 + 0] = hgfy.v0;
fy_elem[i3 + 1] = hgfy.v1;
fy_elem[i3 + 2] = hgfy.v2;
fy_elem[i3 + 3] = hgfy.v3;
fy_elem[i3 + 4] = hgfy.v4;
fy_elem[i3 + 5] = hgfy.v5;
fy_elem[i3 + 6] = hgfy.v6;
fy_elem[i3 + 7] = hgfy.v7;
fz_elem[i3 + 0] = hgfz.v0;
fz_elem[i3 + 1] = hgfz.v1;
fz_elem[i3 + 2] = hgfz.v2;
fz_elem[i3 + 3] = hgfz.v3;
fz_elem[i3 + 4] = hgfz.v4;
fz_elem[i3 + 5] = hgfz.v5;
fz_elem[i3 + 6] = hgfz.v6;
fz_elem[i3 + 7] = hgfz.v7;
} // end accelerated for
/* volatile because otherwise it will be optimized out of the pragma and
break things. */
volatile Index_t nCorner = nodeElemStart[numNode-1]
+ nodeElemCount[numNode-1];
Index_t gnode;
// Collect the data from the local arrays into the final force arrays
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc kernels loop independent vector(256) \
present(nodeElemCount[numNode], \
nodeElemStart[numNode], \
nodeElemCornerList[nCorner], \
fx_elem[numElem8], \
fy_elem[numElem8], \
fz_elem[numElem8], \
fx[numNode], \
fy[numNode], \
fz[numNode]) async(0)
#else
#pragma acc kernels loop independent vector(256) \
present(nodeElemCount[numNode], \
nodeElemStart[numNode], \
nodeElemCornerList[nCorner], \
fx_elem[numElem8], \
fy_elem[numElem8], \
fz_elem[numElem8], \
fx[numNode], \
fy[numNode], \
fz[numNode])
#endif
#else
#pragma omp parallel for firstprivate(numNode)
#endif
for( gnode=0 ; gnode<numNode ; ++gnode )
{
Index_t count = nodeElemCount[gnode] ;
Index_t start = nodeElemStart[gnode] ;
Real_t fx_tmp = (Real_t)(0.0) ;
Real_t fy_tmp = (Real_t)(0.0) ;
Real_t fz_tmp = (Real_t)(0.0) ;
Index_t i;
for (i=0 ; i < count ; ++i) {
Index_t elem = nodeElemCornerList[start+i] ;
fx_tmp += fx_elem[elem] ;
fy_tmp += fy_elem[elem] ;
fz_tmp += fz_elem[elem] ;
}
fx[gnode] += fx_tmp ;
fy[gnode] += fy_tmp ;
fz[gnode] += fz_tmp ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
#ifdef USE_UNIFIEDMEM
acc_delete_unified(gamma, 0);
#endif
}
/******************************************/
#define LoadTmpStorageFBControl(dvdx, dvdy, dvdz, \
pfx, pfy, pfz, \
x8n, y8n, z8n, \
x1, y1, z1, \
i) \
do { \
Index_t jj; \
jj = 8*(i)+0; \
(dvdx)[jj] = (pfx).v0; \
(dvdy)[jj] = (pfy).v0; \
(dvdz)[jj] = (pfz).v0; \
(x8n)[jj] = (x1).v0; \
(y8n)[jj] = (y1).v0; \
(z8n)[jj] = (z1).v0; \
jj = 8*(i)+1; \
(dvdx)[jj] = (pfx).v1; \
(dvdy)[jj] = (pfy).v1; \
(dvdz)[jj] = (pfz).v1; \
(x8n)[jj] = (x1).v1; \
(y8n)[jj] = (y1).v1; \
(z8n)[jj] = (z1).v1; \
jj = 8*(i)+2; \
(dvdx)[jj] = (pfx).v2; \
(dvdy)[jj] = (pfy).v2; \
(dvdz)[jj] = (pfz).v2; \
(x8n)[jj] = (x1).v2; \
(y8n)[jj] = (y1).v2; \
(z8n)[jj] = (z1).v2; \
jj = 8*(i)+3; \
(dvdx)[jj] = (pfx).v3; \
(dvdy)[jj] = (pfy).v3; \
(dvdz)[jj] = (pfz).v3; \
(x8n)[jj] = (x1).v3; \
(y8n)[jj] = (y1).v3; \
(z8n)[jj] = (z1).v3; \
jj = 8*(i)+4; \
(dvdx)[jj] = (pfx).v4; \
(dvdy)[jj] = (pfy).v4; \
(dvdz)[jj] = (pfz).v4; \
(x8n)[jj] = (x1).v4; \
(y8n)[jj] = (y1).v4; \
(z8n)[jj] = (z1).v4; \
jj = 8*(i)+5; \
(dvdx)[jj] = (pfx).v5; \
(dvdy)[jj] = (pfy).v5; \
(dvdz)[jj] = (pfz).v5; \
(x8n)[jj] = (x1).v5; \
(y8n)[jj] = (y1).v5; \
(z8n)[jj] = (z1).v5; \
jj = 8*(i)+6; \
(dvdx)[jj] = (pfx).v6; \
(dvdy)[jj] = (pfy).v6; \
(dvdz)[jj] = (pfz).v6; \
(x8n)[jj] = (x1).v6; \
(y8n)[jj] = (y1).v6; \
(z8n)[jj] = (z1).v6; \
jj = 8*(i)+7; \
(dvdx)[jj] = (pfx).v7; \
(dvdy)[jj] = (pfy).v7; \
(dvdz)[jj] = (pfz).v7; \
(x8n)[jj] = (x1).v7; \
(y8n)[jj] = (y1).v7; \
(z8n)[jj] = (z1).v7; \
} while(0) \
static inline
void CalcHourglassControlForElems(
Real_t *x, Real_t *y, Real_t *z,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t determ[], Real_t hgcoef,
Index_t *nodelist,
Index_t *nodeElemCount,
Index_t *nodeElemStart,
Index_t *nodeElemCornerList)
{
Index_t numElem = m_numElem ;
volatile Index_t numElem8 = numElem * 8 ;
volatile Index_t numNode = m_numNode;
Real_t *dvdx = m_dvdx;
Real_t *dvdy = m_dvdy;
Real_t *dvdz = m_dvdz;
Real_t *x8n = m_x8n;
Real_t *y8n = m_y8n;
Real_t *z8n = m_z8n;
Real_t *volo = m_volo;
Real_t *v = m_v;
int abort = 0;
/* start loop over elements */
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(dvdx[numElem8], \
dvdy[numElem8], \
dvdz[numElem8], \
x8n[numElem8], \
y8n[numElem8], \
z8n[numElem8], \
x[numNode], \
y[numNode], \
z[numNode], \
volo[numElem], \
v[numElem], \
determ[numElem], \
nodelist[numElem8]) \
reduction(max: abort) async(0)
#else
#pragma acc parallel loop present(dvdx[numElem8], \
dvdy[numElem8], \
dvdz[numElem8], \
x8n[numElem8], \
y8n[numElem8], \
z8n[numElem8], \
x[numNode], \
y[numNode], \
z[numNode], \
volo[numElem], \
v[numElem], \
determ[numElem], \
nodelist[numElem8]) \
reduction(max: abort)
#endif
#else
#pragma omp parallel for firstprivate(numElem) reduction(max: abort)
#endif
for (i=0 ; i<numElem ; ++i){
val8 x1;
val8 y1;
val8 z1;
val8 pfx;
val8 pfy;
val8 pfz;
Index_t* elemToNode = &nodelist[i*8];
CollectDomainNodesToElemNodes(x, y, z,
elemToNode, x1, y1, z1);
CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1);
/* load into temporary storage for FB Hour Glass control */
LoadTmpStorageFBControl(dvdx, dvdy, dvdz,
pfx, pfy, pfz,
x8n, y8n, z8n,
x1, y1, z1,
i);
determ[i] = volo[i] * v[i];
/* Do a check for negative volumes */
if ( v[i] <= (Real_t)(0.0) ) {
abort = 1;
}
} // end for
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
if(abort) {
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, VolumeError) ;
#else
exit(VolumeError);
#endif
}
if ( hgcoef > (Real_t)(0.) ) {
CalcFBHourglassForceForElems(nodelist, nodeElemCount,
nodeElemStart, nodeElemCornerList,
determ, fx, fy, fz,
x8n, y8n, z8n, dvdx, dvdy, dvdz,
hgcoef, numElem, numNode );
}
return ;
}
/******************************************/
static inline
void CalcVolumeForceForElems(Real_t *fx, Real_t *fy, Real_t *fz)
{
Index_t numElem = m_numElem ;
Index_t numNode = m_numNode;
if (numElem != 0) {
Real_t hgcoef = m_hgcoef ;
Real_t *sigxx = m_sigxx;
Real_t *sigyy = m_sigyy;
Real_t *sigzz = m_sigzz;
Real_t *determ = m_determ;
Real_t *p = m_p;
Real_t *q = m_q;
Real_t *x = m_x;
Real_t *y = m_y;
Real_t *z = m_z;
Index_t *nodelist = m_nodelist;
Index_t *nodeElemCount = m_nodeElemCount;
Index_t *nodeElemStart = m_nodeElemStart;
Index_t *nodeElemCornerList = m_nodeElemCornerList;
/* Sum contributions to total stress tensor */
InitStressTermsForElems(p, q, sigxx, sigyy, sigzz, numElem);
// call elemlib stress integration loop to produce nodal forces from
// material stresses.
IntegrateStressForElems( nodelist,
x, y, z,
fx, fy, fz,
m_fx_elem, m_fy_elem, m_fz_elem,
nodeElemCount,
nodeElemStart,
nodeElemCornerList,
sigxx, sigyy, sigzz, determ, numElem,
numNode);
int abort = 0;
Index_t k;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(determ[numElem]) \
reduction(max: abort) async(0)
#else
#pragma acc parallel loop present(determ[numElem]) \
reduction(max: abort)
#endif
#else
#pragma omp parallel for reduction(max:abort) firstprivate(numElem)
#endif
for(k = 0; k < numElem; ++k) {
if(determ[k] <= (Real_t)(0.0)) {
abort = 1;
}
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
if(abort == 1) {
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, VolumeError) ;
#else
exit(VolumeError);
#endif
}
CalcHourglassControlForElems(x, y, z, fx, fy, fz, determ, hgcoef,
nodelist, nodeElemCount, nodeElemStart,
nodeElemCornerList);
}
}
/******************************************/
static inline void CalcForceForNodes()
{
Index_t numNode = m_numNode ;
#if USE_MPI
CommRecv(MSG_COMM_SBN, 3,
m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1,
true, false) ;
#endif
Real_t *fx = m_fx;
Real_t *fy = m_fy;
Real_t *fz = m_fz;
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(fx[numNode], \
fy[numNode], \
fz[numNode]) async(0)
#else
#pragma acc parallel loop present(fx[numNode], \
fy[numNode], \
fz[numNode])
#endif
#else
#pragma omp parallel for firstprivate(numNode)
#endif
for (i=0; i<numNode; ++i) {
fx[i] = (Real_t)(0.0);
fy[i] = (Real_t)(0.0);
fz[i] = (Real_t)(0.0);
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
/* Calcforce calls partial, force, hourq */
CalcVolumeForceForElems(fx, fy, fz) ;
#if USE_MPI
Real_t *fieldData[3] ;
#pragma acc data present(fx[numNode], \
fy[numNode], \
fz[numNode])
{
#pragma acc update host(fx[numNode], \
fy[numNode], \
fz[numNode])
fieldData[0] = fx;
fieldData[1] = fy;
fieldData[2] = fz;
CommSend(MSG_COMM_SBN, 3, fieldData,
m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1,
true, false) ;
CommSBN(3, fieldData) ;
#ifdef USE_UNIFIEDMEM
#pragma acc update device(fx[numNode], \
fy[numNode], \
fz[numNode])
#else
#pragma acc update device(fx[numNode], \
fy[numNode], \
fz[numNode]) \
async
#endif
} // end acc data
#endif
}
/******************************************/
static inline
void CalcAccelerationForNodes(Real_t *xdd, Real_t *ydd, Real_t *zdd,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *nodalMass, Index_t numNode)
{
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(fx[numNode], \
fy[numNode], \
fz[numNode], \
xdd[numNode], \
ydd[numNode], \
zdd[numNode], \
nodalMass[numNode]) async(0)
#else
#pragma acc parallel loop present(fx[numNode], \
fy[numNode], \
fz[numNode], \
xdd[numNode], \
ydd[numNode], \
zdd[numNode], \
nodalMass[numNode])
#endif
#else
#pragma omp parallel for firstprivate(numNode)
#endif
for (i = 0; i < numNode; ++i) {
xdd[i] = fx[i] / nodalMass[i];
ydd[i] = fy[i] / nodalMass[i];
zdd[i] = fz[i] / nodalMass[i];
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
static inline
void ApplyAccelerationBoundaryConditionsForNodes(
Real_t *xdd, Real_t *ydd, Real_t *zdd)
{
volatile Index_t numNode = m_numNode;
volatile Index_t size = m_sizeX;
Index_t numNodeBC = (size+1)*(size+1) ;
Index_t *symmX = m_symmX;
Index_t *symmY = m_symmY;
Index_t *symmZ = m_symmZ;
/* replace conditional loops with altered end conditions. This allows to do
the equivalent of a nowait on the device too. */
Index_t endX = m_symmXempty ? 0 : numNodeBC;
Index_t endY = m_symmYempty ? 0 : numNodeBC;
Index_t endZ = m_symmZempty ? 0 : numNodeBC;
if (!endX && !endY && !endZ) return; /* OPENARC BUG */
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel firstprivate(numNodeBC) \
present(xdd[numNode], \
ydd[numNode], \
zdd[numNode], \
symmX[numNodeBC], \
symmY[numNodeBC], \
symmZ[numNodeBC]) async(0)
#else
#pragma acc parallel firstprivate(numNodeBC) \
present(xdd[numNode], \
ydd[numNode], \
zdd[numNode], \
symmX[numNodeBC], \
symmY[numNodeBC], \
symmZ[numNodeBC])
#endif
#else
#pragma omp parallel firstprivate(numNodeBC)
#endif
{
#ifdef _OPENACC
#pragma acc loop
#else
#pragma omp for nowait
#endif
for(i=0 ; i<endX ; ++i) {
xdd[symmX[i]] = (Real_t)(0.0) ;
}
#ifdef _OPENACC
#pragma acc loop
#else
#pragma omp for nowait
#endif
for(i=0 ; i<endY ; ++i) {
ydd[symmY[i]] = (Real_t)(0.0) ;
}
#ifdef _OPENACC
#pragma acc loop
#else
#pragma omp for nowait
#endif
for(i=0 ; i<endZ ; ++i) {
zdd[symmZ[i]] = (Real_t)(0.0) ;
}
} // end parallel region
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
static inline
void CalcVelocityForNodes(Real_t *xd, Real_t *yd, Real_t *zd,
Real_t *xdd, Real_t *ydd, Real_t *zdd,
const Real_t dt, const Real_t u_cut,
Index_t numNode)
{
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(xd[numNode], \
yd[numNode], \
zd[numNode], \
xdd[numNode], \
ydd[numNode], \
zdd[numNode]) async(0)
#else
#pragma acc parallel loop present(xd[numNode], \
yd[numNode], \
zd[numNode], \
xdd[numNode], \
ydd[numNode], \
zdd[numNode])
#endif
#else
#pragma omp parallel for firstprivate(numNode)
#endif
for ( i = 0 ; i < numNode ; ++i )
{
Real_t xdtmp, ydtmp, zdtmp ;
xdtmp = xd[i] + xdd[i] * dt ;
if( fabs(xdtmp) < u_cut ) xdtmp = (Real_t)(0.0);
xd[i] = xdtmp ;
ydtmp = yd[i] + ydd[i] * dt ;
if( fabs(ydtmp) < u_cut ) ydtmp = (Real_t)(0.0);
yd[i] = ydtmp ;
zdtmp = zd[i] + zdd[i] * dt ;
if( fabs(zdtmp) < u_cut ) zdtmp = (Real_t)(0.0);
zd[i] = zdtmp ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
static inline
void CalcPositionForNodes(Real_t *x, Real_t *y, Real_t *z,
Real_t *xd, Real_t *yd, Real_t *zd,
const Real_t dt, Index_t numNode)
{
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#else
#pragma acc parallel loop present(x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode])
#endif
#else
#pragma omp parallel for firstprivate(numNode)
#endif
for ( i = 0 ; i < numNode ; ++i )
{
x[i] += xd[i] * dt ;
y[i] += yd[i] * dt ;
z[i] += zd[i] * dt ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
static inline
void LagrangeNodal()
{
#ifdef SEDOV_SYNC_POS_VEL_EARLY
Real_t *fieldData[6] ;
#endif
const Real_t delt = m_deltatime ;
Real_t u_cut = m_u_cut ;
Index_t numNode = m_numNode;
Index_t numElem = m_numElem;
Real_t *fx = m_fx;
Real_t *fy = m_fy;
Real_t *fz = m_fz;
Real_t *x = m_x;
Real_t *y = m_y;
Real_t *z = m_z;
Real_t *xd = m_xd;
Real_t *yd = m_yd;
Real_t *zd = m_zd;
Real_t *xdd = m_xdd;
Real_t *ydd = m_ydd;
Real_t *zdd = m_zdd;
Real_t *nodalMass = m_nodalMass;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
CalcForceForNodes();
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(MSG_SYNC_POS_VEL, 6,
m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1,
false, false) ;
#endif
#endif
// redundant data region to allow for early acc updates before communication
#pragma acc data present(x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode])
{
#if USE_MPI
/* used for async update */
volatile int up = 1;
/* wait for async device update in CalcForceForNodes to complete */
#ifndef USE_UNIFIEDMEM
#pragma acc wait
#endif
#endif
CalcAccelerationForNodes(xdd, ydd, zdd,
fx, fy, fz,
nodalMass, numNode);
ApplyAccelerationBoundaryConditionsForNodes(xdd, ydd, zdd);
CalcVelocityForNodes( xd, yd, zd,
xdd, ydd, zdd,
delt, u_cut, m_numNode) ;
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
/* start to update velocities asynchronously before the MPI comm */
#ifdef USE_UNIFIEDMEM
#pragma acc update host(xd[numNode], \
yd[numNode], \
zd[numNode])
#else
#pragma acc update host(xd[numNode], \
yd[numNode], \
zd[numNode]) \
async(up)
#endif
#endif
#endif
CalcPositionForNodes( x, y, z,
xd, yd, zd,
delt, m_numNode );
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
#ifdef USE_UNIFIEDMEM
#pragma acc update host(x[numNode], \
y[numNode], \
z[numNode])
#else
#pragma acc update host(x[numNode], \
y[numNode], \
z[numNode]) \
async(up)
#endif
#ifndef USE_UNIFIEDMEM
#pragma acc wait(up)
#endif
fieldData[0] = x ;
fieldData[1] = y ;
fieldData[2] = z ;
fieldData[3] = xd ;
fieldData[4] = yd ;
fieldData[5] = zd ;
CommSend(MSG_SYNC_POS_VEL, 6, fieldData,
m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1,
false, false) ;
CommSyncPosVel() ;
/* update device after CommRecv */
#ifdef USE_UNIFIEDMEM
#pragma acc update device(x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode])
#else
#pragma acc update device(x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode]) \
async
#endif
#endif
#endif
} // end acc data
return;
}
/******************************************/
static inline
Real_t CalcElemVolumeStatic( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t x4, const Real_t x5,
const Real_t x6, const Real_t x7,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t y4, const Real_t y5,
const Real_t y6, const Real_t y7,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3,
const Real_t z4, const Real_t z5,
const Real_t z6, const Real_t z7 )
{
Real_t twelveth = (Real_t)(1.0)/(Real_t)(12.0);
Real_t dx61 = x6 - x1;
Real_t dy61 = y6 - y1;
Real_t dz61 = z6 - z1;
Real_t dx70 = x7 - x0;
Real_t dy70 = y7 - y0;
Real_t dz70 = z7 - z0;
Real_t dx63 = x6 - x3;
Real_t dy63 = y6 - y3;
Real_t dz63 = z6 - z3;
Real_t dx20 = x2 - x0;
Real_t dy20 = y2 - y0;
Real_t dz20 = z2 - z0;
Real_t dx50 = x5 - x0;
Real_t dy50 = y5 - y0;
Real_t dz50 = z5 - z0;
Real_t dx64 = x6 - x4;
Real_t dy64 = y6 - y4;
Real_t dz64 = z6 - z4;
Real_t dx31 = x3 - x1;
Real_t dy31 = y3 - y1;
Real_t dz31 = z3 - z1;
Real_t dx72 = x7 - x2;
Real_t dy72 = y7 - y2;
Real_t dz72 = z7 - z2;
Real_t dx43 = x4 - x3;
Real_t dy43 = y4 - y3;
Real_t dz43 = z4 - z3;
Real_t dx57 = x5 - x7;
Real_t dy57 = y5 - y7;
Real_t dz57 = z5 - z7;
Real_t dx14 = x1 - x4;
Real_t dy14 = y1 - y4;
Real_t dz14 = z1 - z4;
Real_t dx25 = x2 - x5;
Real_t dy25 = y2 - y5;
Real_t dz25 = z2 - z5;
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
Real_t volume =
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20,
dy31 + dy72, dy63, dy20,
dz31 + dz72, dz63, dz20) +
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70,
dy43 + dy57, dy64, dy70,
dz43 + dz57, dz64, dz70) +
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50,
dy14 + dy25, dy61, dy50,
dz14 + dz25, dz61, dz50);
#undef TRIPLE_PRODUCT
volume *= twelveth;
return volume ;
}
/* defined again outside because you can not define macros within macros */
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
//static inline
#define CalcElemVolume_Full(x0, x1, \
x2, x3, \
x4, x5, \
x6, x7, \
y0, y1, \
y2, y3, \
y4, y5, \
y6, y7, \
z0, z1, \
z2, z3, \
z4, z5, \
z6, z7) \
do { \
Real_t twelveth = (Real_t)(1.0)/(Real_t)(12.0); \
\
Real_t dx61 = (x6) - (x1); \
Real_t dy61 = (y6) - (y1); \
Real_t dz61 = (z6) - (z1); \
\
Real_t dx70 = (x7) - (x0); \
Real_t dy70 = (y7) - (y0); \
Real_t dz70 = (z7) - (z0); \
\
Real_t dx63 = (x6) - (x3); \
Real_t dy63 = (y6) - (y3); \
Real_t dz63 = (z6) - (z3); \
\
Real_t dx20 = (x2) - (x0); \
Real_t dy20 = (y2) - (y0); \
Real_t dz20 = (z2) - (z0); \
\
Real_t dx50 = (x5) - (x0); \
Real_t dy50 = (y5) - (y0); \
Real_t dz50 = (z5) - (z0); \
\
Real_t dx64 = (x6) - (x4); \
Real_t dy64 = (y6) - (y4); \
Real_t dz64 = (z6) - (z4); \
\
Real_t dx31 = (x3) - (x1); \
Real_t dy31 = (y3) - (y1); \
Real_t dz31 = (z3) - (z1); \
\
Real_t dx72 = (x7) - (x2); \
Real_t dy72 = (y7) - (y2); \
Real_t dz72 = (z7) - (z2); \
\
Real_t dx43 = (x4) - (x3); \
Real_t dy43 = (y4) - (y3); \
Real_t dz43 = (z4) - (z3); \
\
Real_t dx57 = (x5) - (x7); \
Real_t dy57 = (y5) - (y7); \
Real_t dz57 = (z5) - (z7); \
\
Real_t dx14 = (x1) - (x4); \
Real_t dy14 = (y1) - (y4); \
Real_t dz14 = (z1) - (z4); \
\
Real_t dx25 = (x2) - (x5); \
Real_t dy25 = (y2) - (y5); \
Real_t dz25 = (z2) - (z5); \
\
volume = \
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20, \
dy31 + dy72, dy63, dy20, \
dz31 + dz72, dz63, dz20) + \
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70, \
dy43 + dy57, dy64, dy70, \
dz43 + dz57, dz64, dz70) + \
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50, \
dy14 + dy25, dy61, dy50, \
dz14 + dz25, dz61, dz50); \
\
volume *= twelveth; \
} while(0)
/******************************************/
//inline
Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] )
{
return CalcElemVolumeStatic( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7],
z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]);
}
//static inline
#define CalcElemVolume_macro(x,y,z) \
do { \
CalcElemVolume_Full((x.v0), (x.v1), (x.v2), (x.v3), (x.v4), (x.v5), (x.v6), (x.v7), \
(y.v0), (y.v1), (y.v2), (y.v3), (y.v4), (y.v5), (y.v6), (y.v7), \
(z.v0), (z.v1), (z.v2), (z.v3), (z.v4), (z.v5), (z.v6), (z.v7)); \
} while(0)
/******************************************/
//static inline
#define AreaFace_macro(x0, x1, \
x2, x3, \
y0, y1, \
y2, y3, \
z0, z1, \
z2, z3) \
do { \
Real_t fx = (x2 - x0) - (x3 - x1); \
Real_t fy = (y2 - y0) - (y3 - y1); \
Real_t fz = (z2 - z0) - (z3 - z1); \
Real_t gx = (x2 - x0) + (x3 - x1); \
Real_t gy = (y2 - y0) + (y3 - y1); \
Real_t gz = (z2 - z0) + (z3 - z1); \
a = \
(fx * fx + fy * fy + fz * fz) * \
(gx * gx + gy * gy + gz * gz) - \
(fx * gx + fy * gy + fz * gz) * \
(fx * gx + fy * gy + fz * gz); \
} while(0)
/******************************************/
//static inline
#define CalcElemCharacteristicLength_macro(x, \
y, \
z, \
volume) \
do { \
charLength = (Real_t)(0.0); \
Real_t a; \
\
AreaFace_macro(x.v0,x.v1,x.v2,x.v3, \
y.v0,y.v1,y.v2,y.v3, \
z.v0,z.v1,z.v2,z.v3) ; \
charLength = MAX(a,charLength) ; \
\
AreaFace_macro(x.v4,x.v5,x.v6,x.v7, \
y.v4,y.v5,y.v6,y.v7, \
z.v4,z.v5,z.v6,z.v7) ; \
charLength = MAX(a,charLength) ; \
\
AreaFace_macro(x.v0,x.v1,x.v5,x.v4, \
y.v0,y.v1,y.v5,y.v4, \
z.v0,z.v1,z.v5,z.v4) ; \
charLength = MAX(a,charLength) ; \
\
AreaFace_macro(x.v1,x.v2,x.v6,x.v5, \
y.v1,y.v2,y.v6,y.v5, \
z.v1,z.v2,z.v6,z.v5) ; \
charLength = MAX(a,charLength) ; \
\
AreaFace_macro(x.v2,x.v3,x.v7,x.v6, \
y.v2,y.v3,y.v7,y.v6, \
z.v2,z.v3,z.v7,z.v6) ; \
charLength = MAX(a,charLength) ; \
\
AreaFace_macro(x.v3,x.v0,x.v4,x.v7, \
y.v3,y.v0,y.v4,y.v7, \
z.v3,z.v0,z.v4,z.v7) ; \
charLength = MAX(a,charLength) ; \
\
charLength = (Real_t)(4.0) * volume / sqrt(charLength); \
} while(0)
/******************************************/
//static inline
#define CalcElemVelocityGradient_macro(xvel, \
yvel, \
zvel, \
b, \
detJ, \
d) \
do { \
const Real_t inv_detJ = (Real_t)(1.0) / detJ ; \
Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz; \
\
d.v0 = inv_detJ * ( b.v0_0 * (xvel.v0-xvel.v6) \
+ b.v0_1 * (xvel.v1-xvel.v7) \
+ b.v0_2 * (xvel.v2-xvel.v4) \
+ b.v0_3 * (xvel.v3-xvel.v5) ); \
\
d.v1 = inv_detJ * ( b.v1_0 * (yvel.v0-yvel.v6) \
+ b.v1_1 * (yvel.v1-yvel.v7) \
+ b.v1_2 * (yvel.v2-yvel.v4) \
+ b.v1_3 * (yvel.v3-yvel.v5) ); \
\
d.v2 = inv_detJ * ( b.v2_0 * (zvel.v0-zvel.v6) \
+ b.v2_1 * (zvel.v1-zvel.v7) \
+ b.v2_2 * (zvel.v2-zvel.v4) \
+ b.v2_3 * (zvel.v3-zvel.v5) ); \
\
dyddx = inv_detJ * ( b.v0_0 * (yvel.v0-yvel.v6) \
+ b.v0_1 * (yvel.v1-yvel.v7) \
+ b.v0_2 * (yvel.v2-yvel.v4) \
+ b.v0_3 * (yvel.v3-yvel.v5) ); \
\
dxddy = inv_detJ * ( b.v1_0 * (xvel.v0-xvel.v6) \
+ b.v1_1 * (xvel.v1-xvel.v7) \
+ b.v1_2 * (xvel.v2-xvel.v4) \
+ b.v1_3 * (xvel.v3-xvel.v5) ); \
\
dzddx = inv_detJ * ( b.v0_0 * (zvel.v0-zvel.v6) \
+ b.v0_1 * (zvel.v1-zvel.v7) \
+ b.v0_2 * (zvel.v2-zvel.v4) \
+ b.v0_3 * (zvel.v3-zvel.v5) ); \
\
dxddz = inv_detJ * ( b.v2_0 * (xvel.v0-xvel.v6) \
+ b.v2_1 * (xvel.v1-xvel.v7) \
+ b.v2_2 * (xvel.v2-xvel.v4) \
+ b.v2_3 * (xvel.v3-xvel.v5) ); \
\
dzddy = inv_detJ * ( b.v1_0 * (zvel.v0-zvel.v6) \
+ b.v1_1 * (zvel.v1-zvel.v7) \
+ b.v1_2 * (zvel.v2-zvel.v4) \
+ b.v1_3 * (zvel.v3-zvel.v5) ); \
\
dyddz = inv_detJ * ( b.v2_0 * (yvel.v0-yvel.v6) \
+ b.v2_1 * (yvel.v1-yvel.v7) \
+ b.v2_2 * (yvel.v2-yvel.v4) \
+ b.v2_3 * (yvel.v3-yvel.v5) ); \
d.v5 = (Real_t)( .5) * ( dxddy + dyddx ); \
d.v4 = (Real_t)( .5) * ( dxddz + dzddx ); \
d.v3 = (Real_t)( .5) * ( dzddy + dyddz ); \
} while(0)
/******************************************/
//static inline
void CalcKinematicsForElems( Index_t *nodelist,
Real_t *x, Real_t *y, Real_t *z,
Real_t *xd, Real_t *yd, Real_t *zd,
Real_t *dxx, Real_t *dyy, Real_t *dzz,
Real_t *v, Real_t *volo,
Real_t *vnew, Real_t *delv, Real_t *arealg,
Real_t deltaTime, Index_t numElem, Index_t numNode)
{
volatile Index_t numElem8 = numElem * 8;
Index_t k;
// loop over all elements
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(dxx[numElem], \
dyy[numElem], \
dzz[numElem], \
x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode], \
v[numElem], \
volo[numElem], \
vnew[numElem], \
delv[numElem], \
arealg[numElem], \
nodelist[numElem8]) async(0)
#else
#pragma acc parallel loop present(dxx[numElem], \
dyy[numElem], \
dzz[numElem], \
x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode], \
v[numElem], \
volo[numElem], \
vnew[numElem], \
delv[numElem], \
arealg[numElem], \
nodelist[numElem8])
#endif
#else
#pragma omp parallel for firstprivate(numElem, deltaTime)
#endif
for( k=0 ; k<numElem ; ++k )
{
bmat B ; /** shape function derivatives */
val6 D ;
val8 x_local ;
val8 y_local ;
val8 z_local ;
val8 xd_local ;
val8 yd_local ;
val8 zd_local ;
Real_t detJ = (Real_t)(0.0) ;
Real_t volume ;
Real_t relativeVolume ;
const Index_t* const elemToNode = &nodelist[8*k] ;
// get nodal coordinates from global arrays and copy into local arrays.
// Loop unrolled because the PGI OpenACC implementation currently stores
// locally-defined arrays in a global, shared context. Thus we have to use
// scalars instead to get them in registers.
Index_t gnode;
gnode = elemToNode[0];
x_local.v0 = x[gnode];
y_local.v0 = y[gnode];
z_local.v0 = z[gnode];
gnode = elemToNode[1];
x_local.v1 = x[gnode];
y_local.v1 = y[gnode];
z_local.v1 = z[gnode];
gnode = elemToNode[2];
x_local.v2 = x[gnode];
y_local.v2 = y[gnode];
z_local.v2 = z[gnode];
gnode = elemToNode[3];
x_local.v3 = x[gnode];
y_local.v3 = y[gnode];
z_local.v3 = z[gnode];
gnode = elemToNode[4];
x_local.v4 = x[gnode];
y_local.v4 = y[gnode];
z_local.v4 = z[gnode];
gnode = elemToNode[5];
x_local.v5 = x[gnode];
y_local.v5 = y[gnode];
z_local.v5 = z[gnode];
gnode = elemToNode[6];
x_local.v6 = x[gnode];
y_local.v6 = y[gnode];
z_local.v6 = z[gnode];
gnode = elemToNode[7];
x_local.v7 = x[gnode];
y_local.v7 = y[gnode];
z_local.v7 = z[gnode];
// volume calculations - CalcElemVolume is a macro that sets volume
CalcElemVolume_macro(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
vnew[k] = relativeVolume ;
delv[k] = relativeVolume - v[k] ;
// set characteristic length
Real_t charLength;
CalcElemCharacteristicLength_macro(x_local, y_local, z_local,
volume);
arealg[k] = charLength;
// get nodal velocities from global array and copy into local arrays.
gnode = elemToNode[0];
xd_local.v0 = xd[gnode];
yd_local.v0 = yd[gnode];
zd_local.v0 = zd[gnode];
gnode = elemToNode[1];
xd_local.v1 = xd[gnode];
yd_local.v1 = yd[gnode];
zd_local.v1 = zd[gnode];
gnode = elemToNode[2];
xd_local.v2 = xd[gnode];
yd_local.v2 = yd[gnode];
zd_local.v2 = zd[gnode];
gnode = elemToNode[3];
xd_local.v3 = xd[gnode];
yd_local.v3 = yd[gnode];
zd_local.v3 = zd[gnode];
gnode = elemToNode[4];
xd_local.v4 = xd[gnode];
yd_local.v4 = yd[gnode];
zd_local.v4 = zd[gnode];
gnode = elemToNode[5];
xd_local.v5 = xd[gnode];
yd_local.v5 = yd[gnode];
zd_local.v5 = zd[gnode];
gnode = elemToNode[6];
xd_local.v6 = xd[gnode];
yd_local.v6 = yd[gnode];
zd_local.v6 = zd[gnode];
gnode = elemToNode[7];
xd_local.v7 = xd[gnode];
yd_local.v7 = yd[gnode];
zd_local.v7 = zd[gnode];
Real_t dt2 = (Real_t)(0.5) * deltaTime;
x_local.v0 -= dt2 * xd_local.v0;
y_local.v0 -= dt2 * yd_local.v0;
z_local.v0 -= dt2 * zd_local.v0;
x_local.v1 -= dt2 * xd_local.v1;
y_local.v1 -= dt2 * yd_local.v1;
z_local.v1 -= dt2 * zd_local.v1;
x_local.v2 -= dt2 * xd_local.v2;
y_local.v2 -= dt2 * yd_local.v2;
z_local.v2 -= dt2 * zd_local.v2;
x_local.v3 -= dt2 * xd_local.v3;
y_local.v3 -= dt2 * yd_local.v3;
z_local.v3 -= dt2 * zd_local.v3;
x_local.v4 -= dt2 * xd_local.v4;
y_local.v4 -= dt2 * yd_local.v4;
z_local.v4 -= dt2 * zd_local.v4;
x_local.v5 -= dt2 * xd_local.v5;
y_local.v5 -= dt2 * yd_local.v5;
z_local.v5 -= dt2 * zd_local.v5;
x_local.v6 -= dt2 * xd_local.v6;
y_local.v6 -= dt2 * yd_local.v6;
z_local.v6 -= dt2 * zd_local.v6;
x_local.v7 -= dt2 * xd_local.v7;
y_local.v7 -= dt2 * yd_local.v7;
z_local.v7 -= dt2 * zd_local.v7;
CalcElemShapeFunctionDerivatives_unrolled( x_local, y_local, z_local,
B, detJ );
CalcElemVelocityGradient_macro( xd_local, yd_local, zd_local,
B, detJ, D );
// put velocity gradient quantities into their global arrays.
dxx[k] = D.v0;
dyy[k] = D.v1;
dzz[k] = D.v2;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
static inline
void CalcLagrangeElements(Real_t* vnew)
{
Index_t numElem = m_numElem ;
Index_t numNode = m_numNode ;
if (numElem > 0) {
const Real_t deltatime = m_deltatime ;
// strains are now allocated at startup to prevent unnecessary mem transfers
Real_t *dxx = m_dxx;
Real_t *dyy = m_dyy;
Real_t *dzz = m_dzz;
Real_t *x = m_x;
Real_t *y = m_y;
Real_t *z = m_z;
Real_t *xd = m_xd;
Real_t *yd = m_yd;
Real_t *zd = m_zd;
Real_t *v = m_v;
Real_t *volo = m_volo;
Real_t *vdov = m_vdov;
Real_t *delv = m_delv;
Real_t *arealg = m_arealg;
Index_t *nodelist = m_nodelist;
CalcKinematicsForElems(nodelist,
x, y, z,
xd, yd, zd,
dxx, dyy, dzz,
v, volo,
vnew, delv, arealg,
deltatime, numElem, numNode);
// element loop to do some stuff not included in the elemlib function.
int abort = 0;
Index_t k;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(vdov[numElem], \
dxx[numElem], \
dyy[numElem], \
dzz[numElem], \
vnew[numElem]) \
reduction(max: abort) async(0)
#else
#pragma acc parallel loop present(vdov[numElem], \
dxx[numElem], \
dyy[numElem], \
dzz[numElem], \
vnew[numElem]) \
reduction(max: abort)
#endif
#else
#pragma omp parallel for firstprivate(numElem) reduction(max: abort)
#endif
for ( k=0 ; k<numElem ; ++k )
{
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdov_k = dxx[k] + dyy[k] + dzz[k] ;
Real_t vdovthird = vdov_k/(Real_t)(3.0) ;
// make the rate of deformation tensor deviatoric
vdov[k] = vdov_k ;
dxx[k] -= vdovthird ;
dyy[k] -= vdovthird ;
dzz[k] -= vdovthird ;
// See if any volumes are negative, and take appropriate action.
if (vnew[k] <= (Real_t)(0.0))
{
abort = 1;
}
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
if(abort) {
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, VolumeError) ;
#else
exit(VolumeError);
#endif
}
} // end if numElem > 0
}
/******************************************/
static inline
void CalcMonotonicQGradientsForElems(Real_t vnew[], Index_t allElem)
{
volatile Index_t numNode = m_numNode;
Index_t numElem = m_numElem;
volatile Int_t numElem8 = m_numElem * 8;
Real_t *x = m_x;
Real_t *y = m_y;
Real_t *z = m_z;
Real_t *xd = m_xd;
Real_t *yd = m_yd;
Real_t *zd = m_zd;
Real_t *volo = m_volo;
Index_t *nodelist = m_nodelist;
Real_t *delv_xi = m_delv_xi;
Real_t *delv_eta = m_delv_eta;
Real_t *delv_zeta = m_delv_zeta;
Real_t *delx_xi = m_delx_xi;
Real_t *delx_eta = m_delx_eta;
Real_t *delx_zeta = m_delx_zeta;
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(vnew[numElem], \
nodelist[numElem8], \
x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode], \
volo[numElem], \
delx_xi[allElem], \
delx_eta[allElem], \
delx_zeta[allElem], \
delv_xi[allElem], \
delv_eta[allElem], \
delv_zeta[allElem]) async(0)
#else
#pragma acc parallel loop present(vnew[numElem], \
nodelist[numElem8], \
x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode], \
volo[numElem], \
delx_xi[allElem], \
delx_eta[allElem], \
delx_zeta[allElem], \
delv_xi[allElem], \
delv_eta[allElem], \
delv_zeta[allElem])
#endif
#else
#pragma omp parallel for firstprivate(numElem)
#endif
for (i = 0 ; i < numElem ; ++i ) {
const Real_t ptiny = (Real_t)(1.e-36) ;
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
const Index_t *elemToNode = &nodelist[i*8];
Index_t n0 = elemToNode[0] ;
Index_t n1 = elemToNode[1] ;
Index_t n2 = elemToNode[2] ;
Index_t n3 = elemToNode[3] ;
Index_t n4 = elemToNode[4] ;
Index_t n5 = elemToNode[5] ;
Index_t n6 = elemToNode[6] ;
Index_t n7 = elemToNode[7] ;
Real_t x0 = x[n0] ;
Real_t x1 = x[n1] ;
Real_t x2 = x[n2] ;
Real_t x3 = x[n3] ;
Real_t x4 = x[n4] ;
Real_t x5 = x[n5] ;
Real_t x6 = x[n6] ;
Real_t x7 = x[n7] ;
Real_t y0 = y[n0] ;
Real_t y1 = y[n1] ;
Real_t y2 = y[n2] ;
Real_t y3 = y[n3] ;
Real_t y4 = y[n4] ;
Real_t y5 = y[n5] ;
Real_t y6 = y[n6] ;
Real_t y7 = y[n7] ;
Real_t z0 = z[n0] ;
Real_t z1 = z[n1] ;
Real_t z2 = z[n2] ;
Real_t z3 = z[n3] ;
Real_t z4 = z[n4] ;
Real_t z5 = z[n5] ;
Real_t z6 = z[n6] ;
Real_t z7 = z[n7] ;
Real_t xv0 = xd[n0] ;
Real_t xv1 = xd[n1] ;
Real_t xv2 = xd[n2] ;
Real_t xv3 = xd[n3] ;
Real_t xv4 = xd[n4] ;
Real_t xv5 = xd[n5] ;
Real_t xv6 = xd[n6] ;
Real_t xv7 = xd[n7] ;
Real_t yv0 = yd[n0] ;
Real_t yv1 = yd[n1] ;
Real_t yv2 = yd[n2] ;
Real_t yv3 = yd[n3] ;
Real_t yv4 = yd[n4] ;
Real_t yv5 = yd[n5] ;
Real_t yv6 = yd[n6] ;
Real_t yv7 = yd[n7] ;
Real_t zv0 = zd[n0] ;
Real_t zv1 = zd[n1] ;
Real_t zv2 = zd[n2] ;
Real_t zv3 = zd[n3] ;
Real_t zv4 = zd[n4] ;
Real_t zv5 = zd[n5] ;
Real_t zv6 = zd[n6] ;
Real_t zv7 = zd[n7] ;
Real_t vol = volo[i]*vnew[i] ;
Real_t norm = (Real_t)(1.0) / ( vol + ptiny ) ;
Real_t dxj = (Real_t)(-0.25)*((x0+x1+x5+x4) - (x3+x2+x6+x7)) ;
Real_t dyj = (Real_t)(-0.25)*((y0+y1+y5+y4) - (y3+y2+y6+y7)) ;
Real_t dzj = (Real_t)(-0.25)*((z0+z1+z5+z4) - (z3+z2+z6+z7)) ;
Real_t dxi = (Real_t)( 0.25)*((x1+x2+x6+x5) - (x0+x3+x7+x4)) ;
Real_t dyi = (Real_t)( 0.25)*((y1+y2+y6+y5) - (y0+y3+y7+y4)) ;
Real_t dzi = (Real_t)( 0.25)*((z1+z2+z6+z5) - (z0+z3+z7+z4)) ;
Real_t dxk = (Real_t)( 0.25)*((x4+x5+x6+x7) - (x0+x1+x2+x3)) ;
Real_t dyk = (Real_t)( 0.25)*((y4+y5+y6+y7) - (y0+y1+y2+y3)) ;
Real_t dzk = (Real_t)( 0.25)*((z4+z5+z6+z7) - (z0+z1+z2+z3)) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
delx_zeta[i] = vol / sqrt(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = (Real_t)(0.25)*((xv4+xv5+xv6+xv7) - (xv0+xv1+xv2+xv3)) ;
dyv = (Real_t)(0.25)*((yv4+yv5+yv6+yv7) - (yv0+yv1+yv2+yv3)) ;
dzv = (Real_t)(0.25)*((zv4+zv5+zv6+zv7) - (zv0+zv1+zv2+zv3)) ;
delv_zeta[i] = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
delx_xi[i] = vol / sqrt(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = (Real_t)(0.25)*((xv1+xv2+xv6+xv5) - (xv0+xv3+xv7+xv4)) ;
dyv = (Real_t)(0.25)*((yv1+yv2+yv6+yv5) - (yv0+yv3+yv7+yv4)) ;
dzv = (Real_t)(0.25)*((zv1+zv2+zv6+zv5) - (zv0+zv3+zv7+zv4)) ;
delv_xi[i] = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
delx_eta[i] = vol / sqrt(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = (Real_t)(-0.25)*((xv0+xv1+xv5+xv4) - (xv3+xv2+xv6+xv7)) ;
dyv = (Real_t)(-0.25)*((yv0+yv1+yv5+yv4) - (yv3+yv2+yv6+yv7)) ;
dzv = (Real_t)(-0.25)*((zv0+zv1+zv5+zv4) - (zv3+zv2+zv6+zv7)) ;
delv_eta[i] = ax*dxv + ay*dyv + az*dzv ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
/*
* NOTES: This function uses several goto statements. They are used in the
* place of breaks. This is the result of a bug in the PGI compiler (version
* (13.4-accelerator) in which breaks inside of switches jump out of the omp
* loops they are placed in. We decided that using gotos is a more readable
* alternative than rewriting them all to if-else blocks.
*/
static inline
void CalcMonotonicQRegionForElems(Int_t r,
Real_t vnew[], Real_t ptiny, Index_t allElem)
{
Real_t monoq_limiter_mult = m_monoq_limiter_mult;
Real_t monoq_max_slope = m_monoq_max_slope;
Real_t qlc_monoq = m_qlc_monoq;
Real_t qqc_monoq = m_qqc_monoq;
Index_t *lxim = m_lxim;
Index_t *lxip = m_lxip;
Index_t *letam = m_letam;
Index_t *letap = m_letap;
Index_t *lzetam = m_lzetam;
Index_t *lzetap = m_lzetap;
Real_t *delv_xi = m_delv_xi;
Real_t *delv_eta = m_delv_eta;
Real_t *delv_zeta = m_delv_zeta;
Real_t *delx_xi = m_delx_xi;
Real_t *delx_eta = m_delx_eta;
Real_t *delx_zeta = m_delx_zeta;
Real_t *qq = m_qq;
Real_t *ql = m_ql;
Real_t *elemMass = m_elemMass;
Real_t *volo = m_volo;
Real_t *vdov = m_vdov;
Index_t regElemSize = m_regElemSize[r];
Index_t *regElemlist = m_regElemlist[r];
volatile Index_t numElem = m_numElem;
Int_t *elemBC = m_elemBC;
Index_t ielem;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop firstprivate(qlc_monoq, qqc_monoq, \
monoq_limiter_mult, monoq_max_slope, \
ptiny) \
copyin(regElemlist[regElemSize]) \
present(vnew[numElem], \
vdov[numElem], \
delx_xi[allElem], \
delx_eta[allElem], \
delx_zeta[allElem], \
delv_xi[allElem], \
delv_eta[allElem], \
delv_zeta[allElem], \
elemMass[numElem], \
volo[numElem], \
lxip[numElem], \
lxim[numElem], \
letam[numElem], \
letap[numElem], \
lzetam[numElem], \
lzetap[numElem], \
ql[numElem], \
qq[numElem], \
elemBC[numElem]) async(0)
#else
#pragma acc parallel loop firstprivate(qlc_monoq, qqc_monoq, \
monoq_limiter_mult, monoq_max_slope, \
ptiny) \
copyin(regElemlist[regElemSize]) \
present(vnew[numElem], \
vdov[numElem], \
delx_xi[allElem], \
delx_eta[allElem], \
delx_zeta[allElem], \
delv_xi[allElem], \
delv_eta[allElem], \
delv_zeta[allElem], \
elemMass[numElem], \
volo[numElem], \
lxip[numElem], \
lxim[numElem], \
letam[numElem], \
letap[numElem], \
lzetam[numElem], \
lzetap[numElem], \
ql[numElem], \
qq[numElem], \
elemBC[numElem])
#endif
#else
#pragma omp parallel for firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny)
#endif
for ( ielem = 0 ; ielem < regElemSize; ++ielem ) {
Index_t i = regElemlist[ielem];
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Int_t bcMask = elemBC[i];
Real_t delvm, delvp ;
/* phixi */
Real_t norm = (Real_t)(1.) / (delv_xi[i]+ ptiny ) ;
switch (bcMask & XI_M) {
case XI_M_COMM: /* needs comm data */
case 0: delvm = delv_xi[lxim[i]]; goto BCMASK_AND_XI_M;
case XI_M_SYMM: delvm = delv_xi[i] ; goto BCMASK_AND_XI_M;
case XI_M_FREE: delvm = (Real_t)(0.0) ; goto BCMASK_AND_XI_M;
default: /* ERROR */ ; goto BCMASK_AND_XI_M;
}
BCMASK_AND_XI_M:
switch (bcMask & XI_P) {
case XI_P_COMM: /* needs comm data */
case 0: delvp = delv_xi[lxip[i]] ; goto BCMASK_AND_XI_P;
case XI_P_SYMM: delvp = delv_xi[i] ; goto BCMASK_AND_XI_P;
case XI_P_FREE: delvp = (Real_t)(0.0) ; goto BCMASK_AND_XI_P;
default: /* ERROR */ ; goto BCMASK_AND_XI_P;
}
BCMASK_AND_XI_P:
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = (Real_t)(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < (Real_t)(0.)) phixi = (Real_t)(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = (Real_t)(1.) / ( delv_eta[i] + ptiny ) ;
switch (bcMask & ETA_M) {
case ETA_M_COMM: /* needs comm data */
case 0: delvm = delv_eta[letam[i]] ; goto BCMASK_AND_ETA_M;
case ETA_M_SYMM: delvm = delv_eta[i] ; goto BCMASK_AND_ETA_M;
case ETA_M_FREE: delvm = (Real_t)(0.0) ; goto BCMASK_AND_ETA_M;
default: /* ERROR */ ; goto BCMASK_AND_ETA_M;
}
BCMASK_AND_ETA_M:
switch (bcMask & ETA_P) {
case ETA_P_COMM: /* needs comm data */
case 0: delvp = delv_eta[letap[i]] ; goto BCMASK_AND_ETA_P;
case ETA_P_SYMM: delvp = delv_eta[i] ; goto BCMASK_AND_ETA_P;
case ETA_P_FREE: delvp = (Real_t)(0.0) ; goto BCMASK_AND_ETA_P;
default: /* ERROR */ ; goto BCMASK_AND_ETA_P;
}
BCMASK_AND_ETA_P:
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = (Real_t)(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < (Real_t)(0.)) phieta = (Real_t)(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = (Real_t)(1.) / ( delv_zeta[i] + ptiny ) ;
switch (bcMask & ZETA_M) {
case ZETA_M_COMM: /* needs comm data */
case 0: delvm = delv_zeta[lzetam[i]] ; goto BCMASK_AND_ZETA_M;
case ZETA_M_SYMM: delvm = delv_zeta[i] ; goto BCMASK_AND_ZETA_M;
case ZETA_M_FREE: delvm = (Real_t)(0.0) ; goto BCMASK_AND_ZETA_M;
default: /* ERROR */ ; goto BCMASK_AND_ZETA_M;
}
BCMASK_AND_ZETA_M:
switch (bcMask & ZETA_P) {
case ZETA_P_COMM: /* needs comm data */
case 0: delvp = delv_zeta[lzetap[i]] ; goto BCMASK_AND_ZETA_P;
case ZETA_P_SYMM: delvp = delv_zeta[i] ; goto BCMASK_AND_ZETA_P;
case ZETA_P_FREE: delvp = (Real_t)(0.0) ; goto BCMASK_AND_ZETA_P;
default: /* ERROR */ ; goto BCMASK_AND_ZETA_P;
}
BCMASK_AND_ZETA_P:
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = (Real_t)(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < (Real_t)(0.)) phizeta = (Real_t)(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( vdov[i] > (Real_t)(0.) ) {
qlin = (Real_t)(0.) ;
qquad = (Real_t)(0.) ;
}
else {
Real_t delvxxi = delv_xi[i] * delx_xi[i] ;
Real_t delvxeta = delv_eta[i] * delx_eta[i] ;
Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ;
if ( delvxxi > (Real_t)(0.) ) delvxxi = (Real_t)(0.) ;
if ( delvxeta > (Real_t)(0.) ) delvxeta = (Real_t)(0.) ;
if ( delvxzeta > (Real_t)(0.) ) delvxzeta = (Real_t)(0.) ;
Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ;
qlin = -qlc_monoq * rho *
( delvxxi * ((Real_t)(1.) - phixi) +
delvxeta * ((Real_t)(1.) - phieta) +
delvxzeta * ((Real_t)(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * ((Real_t)(1.) - phixi*phixi) +
delvxeta*delvxeta * ((Real_t)(1.) - phieta*phieta) +
delvxzeta*delvxzeta * ((Real_t)(1.) - phizeta*phizeta) ) ;
}
qq[i] = qquad ;
ql[i] = qlin ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
static inline
void CalcMonotonicQForElems(Real_t vnew[], Index_t allElem)
{
//
// initialize parameters
//
const Real_t ptiny = (Real_t)(1.e-36) ;
Index_t r;
//
// calculate the monotonic q for all regions
//
for (r=0 ; r<m_numReg ; ++r) {
if (m_regElemSize[r] > 0) {
CalcMonotonicQRegionForElems(r, vnew, ptiny, allElem);
}
}
}
/******************************************/
static inline
void CalcQForElems(Real_t vnew[])
{
//
// MONOTONIC Q option
//
Index_t numElem = m_numElem ;
if (numElem != 0) {
int allElem = numElem + /* local elem */
2*m_sizeX*m_sizeY + /* plane ghosts */
2*m_sizeX*m_sizeZ + /* row ghosts */
2*m_sizeY*m_sizeZ ; /* col ghosts */
/* Gradients are allocated globally now to reduce memory transfers to
device */
//AllocateGradients(allElem);
#if USE_MPI
CommRecv(MSG_MONOQ, 3,
m_sizeX, m_sizeY, m_sizeZ,
true, true) ;
#endif
/* Calculate velocity gradients */
CalcMonotonicQGradientsForElems(vnew, allElem);
#if USE_MPI
Real_t *fieldData[3] ;
Real_t *delv_xi = m_delv_xi;
Real_t *delv_eta = m_delv_eta;
Real_t *delv_zeta = m_delv_zeta;
#pragma acc data present(delv_xi[allElem], \
delv_eta[allElem], \
delv_zeta[allElem])
{
#pragma acc update host(delv_xi[allElem], \
delv_eta[allElem], \
delv_zeta[allElem])
/* Transfer veloctiy gradients in the first order elements */
/* problem->commElements->Transfer(CommElements::monoQ) ; */
fieldData[0] = delv_xi;
fieldData[1] = delv_eta;
fieldData[2] = delv_zeta;
CommSend(MSG_MONOQ, 3, fieldData,
m_sizeX, m_sizeY, m_sizeZ,
true, true) ;
CommMonoQ() ;
} // end acc data
#endif
CalcMonotonicQForElems(vnew, allElem) ;
// Free up memory
//DeallocateGradients();
/* Don't allow excessive artificial viscosity */
Index_t idx = -1;
Index_t i;
for (i=0; i<numElem; ++i) {
if ( m_q[i] > m_qstop ) {
idx = i ;
break ;
}
}
if(idx >= 0) {
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, QStopError) ;
#else
exit(QStopError);
#endif
}
}
}
/******************************************/
static inline
void CalcPressureForElems(Real_t* p_new, Real_t* bvc,
Real_t* pbvc, Real_t* e_old,
Real_t* compression, Real_t *vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax,
Index_t length, Index_t *regElemList)
{
volatile Index_t numElem = m_numElem;
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(regElemList[length], \
compression[length], \
pbvc[length], \
p_new[length], \
bvc[length], \
e_old[length], \
vnewc[numElem]) async(0)
#else
#pragma acc parallel loop present(regElemList[length], \
compression[length], \
pbvc[length], \
p_new[length], \
bvc[length], \
e_old[length], \
vnewc[numElem])
#endif
#else
#pragma omp parallel for firstprivate(length, pmin, p_cut, eosvmax)
#endif
for (i = 0 ; i < length ; ++i){
Index_t elem = regElemList[i];
// fused loop
Real_t c1s = (Real_t)(2.0)/(Real_t)(3.0) ;
bvc[i] = c1s * (compression[i] + (Real_t)(1.));
pbvc[i] = c1s;
p_new[i] = bvc[i] * e_old[i] ;
if (fabs(p_new[i]) < p_cut )
p_new[i] = (Real_t)(0.0) ;
if ( vnewc[elem] >= eosvmax ) /* impossible condition here? */
p_new[i] = (Real_t)(0.0) ;
if (p_new[i] < pmin)
p_new[i] = pmin ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
static inline
void CalcEnergyForElems(Real_t* p_new, Real_t* e_new, Real_t* q_new,
Real_t* bvc, Real_t* pbvc,
Real_t* p_old, Real_t* e_old, Real_t* q_old,
Real_t* compression, Real_t* compHalfStep,
Real_t* vnewc, Real_t* work, Real_t* delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t* qq_old, Real_t* ql_old,
Real_t rho0,
Real_t eosvmax,
Int_t length, Index_t *regElemList)
{
#ifdef USE_UNIFIEDMEM
Real_t *pHalfStep = (Real_t*) acc_create_unified(NULL, sizeof(Real_t) * (length)) ;
#else
Real_t *pHalfStep = (Real_t*) malloc(sizeof(Real_t) * (length)) ;
#endif
Index_t i;
#ifdef _OPENACC
volatile Index_t numElem = m_numElem;
#pragma acc data create(pHalfStep[length])
{
#endif
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(e_new[length], \
e_old[length], \
p_old[length], \
q_old[length], \
delvc[length], \
work[length]) async(0)
#else
#pragma acc parallel loop present(e_new[length], \
e_old[length], \
p_old[length], \
q_old[length], \
delvc[length], \
work[length])
#endif
#else
#pragma omp parallel for firstprivate(length, emin)
#endif
for (i = 0 ; i < length ; ++i) {
e_new[i] = e_old[i] - (Real_t)(0.5) * delvc[i] * (p_old[i] + q_old[i])
+ (Real_t)(0.5) * work[i];
if (e_new[i] < emin ) {
e_new[i] = emin ;
}
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
CalcPressureForElems(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax, length, regElemList);
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(compHalfStep[length], \
pHalfStep[length], \
delvc[length], \
p_old[length], \
q_old[length], \
ql_old[length], \
qq_old[length], \
q_new[length], \
pbvc[length], \
bvc[length], \
e_new[length]) async(0)
#else
#pragma acc parallel loop present(compHalfStep[length], \
pHalfStep[length], \
delvc[length], \
p_old[length], \
q_old[length], \
ql_old[length], \
qq_old[length], \
q_new[length], \
pbvc[length], \
bvc[length], \
e_new[length])
#endif
#else
#pragma omp parallel for firstprivate(length, rho0)
#endif
for (i = 0 ; i < length ; ++i) {
Real_t vhalf = (Real_t)(1.) / ((Real_t)(1.) + compHalfStep[i]) ;
if ( delvc[i] > (Real_t)(0.) ) {
q_new[i] /* = qq_old[i] = ql_old[i] */ = (Real_t)(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ;
if ( ssc <= (Real_t)(.1111111e-36) ) {
ssc = (Real_t)(.3333333e-18) ;
} else {
ssc = sqrt(ssc) ;
}
q_new[i] = (ssc*ql_old[i] + qq_old[i]) ;
}
e_new[i] = e_new[i] + (Real_t)(0.5) * delvc[i]
* ( (Real_t)(3.0)*(p_old[i] + q_old[i])
- (Real_t)(4.0)*(pHalfStep[i] + q_new[i])) ;
}
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(e_new[length], \
work[length]) async(0)
#else
#pragma acc parallel loop present(e_new[length], \
work[length])
#endif
#else
#pragma omp parallel for firstprivate(length, emin, e_cut)
#endif
for (i = 0 ; i < length ; ++i) {
e_new[i] += (Real_t)(0.5) * work[i];
if (fabs(e_new[i]) < e_cut) {
e_new[i] = (Real_t)(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
CalcPressureForElems(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length, regElemList);
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(regElemList[length], \
pHalfStep[length], \
delvc[length], \
pbvc[length], \
e_new[length], \
bvc[length], \
ql_old[length], \
qq_old[length], \
p_old[length], \
q_old[length], \
p_new[length], \
q_new[length], \
vnewc[numElem]) async(0)
#else
#pragma acc parallel loop present(regElemList[length], \
pHalfStep[length], \
delvc[length], \
pbvc[length], \
e_new[length], \
bvc[length], \
ql_old[length], \
qq_old[length], \
p_old[length], \
q_old[length], \
p_new[length], \
q_new[length], \
vnewc[numElem])
#endif
#else
#pragma omp parallel for firstprivate(length, rho0, emin, e_cut)
#endif
for (i = 0 ; i < length ; ++i){
const Real_t sixth = (Real_t)(1.0) / (Real_t)(6.0) ;
Index_t elem = regElemList[i];
Real_t q_tilde ;
if (delvc[i] > (Real_t)(0.)) {
q_tilde = (Real_t)(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= (Real_t)(.1111111e-36) ) {
ssc = (Real_t)(.3333333e-18) ;
} else {
ssc = sqrt(ssc) ;
}
q_tilde = (ssc*ql_old[i] + qq_old[i]) ;
}
e_new[i] = e_new[i] - ( (Real_t)(7.0)*(p_old[i] + q_old[i])
- (Real_t)(8.0)*(pHalfStep[i] + q_new[i])
+ (p_new[i] + q_tilde)) * delvc[i]*sixth ;
if (fabs(e_new[i]) < e_cut) {
e_new[i] = (Real_t)(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
CalcPressureForElems(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length, regElemList);
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(regElemList[length], \
delvc[length], \
pbvc[length], \
e_new[length], \
vnewc[numElem], \
bvc[length], \
ql_old[length], \
qq_old[length], \
p_new[length], \
q_new[length]) async(0)
#else
#pragma acc parallel loop present(regElemList[length], \
delvc[length], \
pbvc[length], \
e_new[length], \
vnewc[numElem], \
bvc[length], \
ql_old[length], \
qq_old[length], \
p_new[length], \
q_new[length])
#endif
#else
#pragma omp parallel for firstprivate(length, rho0, q_cut)
#endif
for (i = 0 ; i < length ; ++i){
Index_t elem = regElemList[i];
if ( delvc[i] <= (Real_t)(0.) ) {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[elem] * vnewc[elem] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= (Real_t)(.1111111e-36) ) {
ssc = (Real_t)(.3333333e-18) ;
} else {
ssc = sqrt(ssc) ;
}
q_new[i] = (ssc*ql_old[i] + qq_old[i]) ;
if (fabs(q_new[i]) < q_cut) q_new[i] = (Real_t)(0.) ;
}
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
#ifdef _OPENACC
} // end acc data
#endif
#ifdef USE_UNIFIEDMEM
if (pHalfStep != NULL) acc_delete_unified(pHalfStep, 0);
#else
if (pHalfStep != NULL) free(pHalfStep);
#endif
return ;
}
/******************************************/
static inline
void CalcSoundSpeedForElems(Real_t *ss,
Real_t *vnewc, Real_t rho0, Real_t *enewc,
Real_t *pnewc, Real_t *pbvc,
Real_t *bvc, Real_t ss4o3,
Index_t numElem, Int_t len, Index_t *regElemList)
{
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(vnewc[numElem], \
regElemList[len], \
pbvc[len], \
enewc[len], \
bvc[len], \
pnewc[len], \
ss[numElem]) \
firstprivate(rho0, ss4o3) async(0)
#else
#pragma acc parallel loop present(vnewc[numElem], \
regElemList[len], \
pbvc[len], \
enewc[len], \
bvc[len], \
pnewc[len], \
ss[numElem]) \
firstprivate(rho0, ss4o3)
#endif
#else
#pragma omp parallel for firstprivate(rho0, ss4o3)
#endif
for (i = 0; i < len ; ++i) {
int elem = regElemList[i];
Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[elem] * vnewc[elem] *
bvc[i] * pnewc[i]) / rho0;
if (ssTmp <= (Real_t)(.1111111e-36)) {
ssTmp = (Real_t)(.3333333e-18);
}
else {
ssTmp = sqrt(ssTmp);
}
ss[elem] = ssTmp ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
/******************************************/
static inline
void EvalEOSForElems(Real_t *vnewc,
Int_t numElemReg, Index_t *regElemList, Int_t rep)
{
Real_t e_cut = m_e_cut ;
Real_t p_cut = m_p_cut ;
Real_t ss4o3 = m_ss4o3 ;
Real_t q_cut = m_q_cut ;
Real_t eosvmax = m_eosvmax ;
Real_t eosvmin = m_eosvmin ;
Real_t pmin = m_pmin ;
Real_t emin = m_emin ;
Real_t rho0 = m_refdens ;
Real_t *e_old = m_e_old ;
Real_t *delvc = m_delvc ;
Real_t *p_old = m_p_old ;
Real_t *q_old = m_q_old ;
Real_t *compression = m_compression ;
Real_t *compHalfStep = m_compHalfStep ;
Real_t *qq_old = m_qq_old ;
Real_t *ql_old = m_ql_old ;
Real_t *work = m_work ;
Real_t *p_new = m_p_new ;
Real_t *e_new = m_e_new ;
Real_t *q_new = m_q_new ;
Real_t *bvc = m_bvc ;
Real_t *pbvc = m_pbvc ;
Real_t *e = m_e;
Real_t *delv = m_delv;
Real_t *p = m_p;
Real_t *q = m_q;
Real_t *qq = m_qq;
Real_t *ql = m_ql;
Index_t numElem = m_numElem;
#ifdef _OPENACC
#pragma acc data present(e_old[numElemReg], \
delvc[numElemReg], \
p_old[numElemReg], \
q_old[numElemReg], \
compression[numElemReg], \
compHalfStep[numElemReg], \
qq_old[numElemReg], \
ql_old[numElemReg], \
work[numElemReg], \
p_new[numElemReg], \
e_new[numElemReg], \
q_new[numElemReg], \
bvc[numElemReg], \
pbvc[numElemReg]) \
copyin(regElemList[numElemReg])
#endif
{ // acc data brace
Index_t i;
Int_t j;
//loop to add load imbalance based on region number
for(j = 0; j < rep; j++) {
/* compress data, minimal set */
#ifndef _OPENACC
#pragma omp parallel
#endif
//{ // omp parallel brace
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(e_old[numElemReg], \
delvc[numElemReg], \
p_old[numElemReg], \
q_old[numElemReg], \
regElemList[numElemReg], \
qq_old[numElemReg], \
ql_old[numElemReg], \
p[numElem], \
e[numElem], \
q[numElem], \
delv[numElem], \
qq[numElem], \
ql[numElem]) async(0)
#else
#pragma acc parallel loop present(e_old[numElemReg], \
delvc[numElemReg], \
p_old[numElemReg], \
q_old[numElemReg], \
regElemList[numElemReg], \
qq_old[numElemReg], \
ql_old[numElemReg], \
p[numElem], \
e[numElem], \
q[numElem], \
delv[numElem], \
qq[numElem], \
ql[numElem])
#endif
#else
#pragma omp for nowait firstprivate(numElemReg)
#endif
for (i=0; i<numElemReg; ++i) {
int elem = regElemList[i];
e_old[i] = e[elem] ;
delvc[i] = delv[elem] ;
p_old[i] = p[elem] ;
q_old[i] = q[elem] ;
qq_old[i] = qq[elem] ;
ql_old[i] = ql[elem] ;
}
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(vnewc[numElem], \
compression[numElemReg], \
delvc[numElemReg], \
compHalfStep[numElemReg], \
regElemList[numElemReg]) async(0)
#else
#pragma acc parallel loop present(vnewc[numElem], \
compression[numElemReg], \
delvc[numElemReg], \
compHalfStep[numElemReg], \
regElemList[numElemReg])
#endif
#else
#pragma omp for
#endif
for (i = 0; i < numElemReg ; ++i) {
int elem = regElemList[i];
Real_t vchalf ;
compression[i] = (Real_t)(1.) / vnewc[elem] - (Real_t)(1.);
vchalf = vnewc[elem] - delvc[i] * (Real_t)(.5);
compHalfStep[i] = (Real_t)(1.) / vchalf - (Real_t)(1.);
}
// Fused some loops here to reduce overhead of repeatedly calling small kernels
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(vnewc[numElem], \
compHalfStep[numElemReg], \
compression[numElemReg], \
regElemList[numElemReg], \
p_old[numElemReg], \
compHalfStep[numElemReg], \
work[numElemReg]) async(0)
#else
#pragma acc parallel loop present(vnewc[numElem], \
compHalfStep[numElemReg], \
compression[numElemReg], \
regElemList[numElemReg], \
p_old[numElemReg], \
compHalfStep[numElemReg], \
work[numElemReg])
#endif
#else
#pragma omp for
#endif
for(i = 0; i < numElemReg; ++i) {
int elem = regElemList[i];
if (eosvmin != 0.0 && vnewc[elem] <= eosvmin) { /* impossible due to calling func? */
compHalfStep[i] = compression[i] ;
}
if (eosvmax != 0.0 && vnewc[elem] >= eosvmax) { /* impossible due to calling func? */
p_old[i] = (Real_t)(0.) ;
compression[i] = (Real_t)(0.) ;
compHalfStep[i] = (Real_t)(0.) ;
}
work[i] = (Real_t)(0.) ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
//} // end omp parallel
CalcEnergyForElems(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq_old, ql_old, rho0, eosvmax,
numElemReg, regElemList);
} // end foreach repetition
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(p_new[numElemReg], \
e_new[numElemReg], \
q_new[numElemReg], \
p[numElem], \
e[numElem], \
q[numElem]) async(0)
#else
#pragma acc parallel loop present(p_new[numElemReg], \
e_new[numElemReg], \
q_new[numElemReg], \
p[numElem], \
e[numElem], \
q[numElem])
#endif
#else
#pragma omp parallel for firstprivate(numElemReg)
#endif
for (i=0; i<numElemReg; ++i) {
int elem = regElemList[i];
p[elem] = p_new[i] ;
e[elem] = e_new[i] ;
q[elem] = q_new[i] ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
Real_t *ss = m_ss;
CalcSoundSpeedForElems(ss,
vnewc, rho0, e_new, p_new,
pbvc, bvc, ss4o3,
numElem, numElemReg, regElemList) ;
} // end acc data
}
/******************************************/
static inline
void ApplyMaterialPropertiesForElems(Real_t vnew[])
{
Index_t numElem = m_numElem ;
Index_t i;
if (numElem != 0) {
/* Expose all of the variables needed for material evaluation */
Real_t eosvmin = m_eosvmin ;
Real_t eosvmax = m_eosvmax ;
#ifdef _OPENACC
#pragma acc data present(vnew[numElem])
#else
#pragma omp parallel firstprivate(numElem)
#endif
{
// Bound the updated relative volumes with eosvmin/max
if (eosvmin != (Real_t)(0.)) {
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop async(0)
#else
#pragma acc parallel loop
#endif
#else
#pragma omp for
#endif
for(i=0 ; i<numElem ; ++i) {
if (vnew[i] < eosvmin)
vnew[i] = eosvmin ;
}
}
if (eosvmax != (Real_t)(0.)) {
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop async(0)
#else
#pragma acc parallel loop
#endif
#else
#pragma omp for nowait
#endif
for(i=0 ; i<numElem ; ++i) {
if (vnew[i] > eosvmax)
vnew[i] = eosvmax ;
}
}
// This check may not make perfect sense in LULESH, but
// it's representative of something in the full code -
// just leave it in, please
Real_t *v = m_v;
Real_t vc = 1.;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop reduction(min: vc) \
present(v[numElem]) async(0)
#else
#pragma acc parallel loop reduction(min: vc) \
present(v[numElem])
#endif
#else
#pragma omp for nowait private(vc) reduction(min: vc)
#endif
for (i=0; i<numElem; ++i) {
vc = v[i];
if (eosvmin != (Real_t)(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != (Real_t)(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
if (vc <= 0.) {
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, VolumeError) ;
#else
exit(VolumeError);
#endif
}
} // end acc data
Int_t r;
for (r=0 ; r<m_numReg ; r++) {
int numElemReg = m_regElemSize[r];
int *regElemList = m_regElemlist[r];
Int_t rep;
//Determine load imbalance for this region
//round down the number with lowest cost
if(r < m_numReg/2)
rep = 1;
//you don't get an expensive region unless you at least have 5 regions
else if(r < (m_numReg - (m_numReg+15)/20))
rep = 1 + m_cost;
//very expensive regions
else
rep = 10 * (1+ m_cost);
//[DEBUG by Seyong Lee] If-statement is added to invoke EvalEOSForElems()
//only if numElemReg > 0.
if( numElemReg > 0 ) {
EvalEOSForElems(vnew, numElemReg, regElemList, rep);
}
}
}
}
/******************************************/
static inline
void UpdateVolumesForElems(Real_t *vnew, Real_t *v,
Real_t v_cut, Index_t length)
{
if (length != 0) {
Index_t i;
#ifdef _OPENACC
#ifdef USE_ASYNC
#pragma acc parallel loop present(vnew[length], \
v[length]) async(0)
#else
#pragma acc parallel loop present(vnew[length], \
v[length])
#endif
#else
#pragma omp parallel for firstprivate(length, v_cut)
#endif
for(i=0 ; i<length ; ++i) {
Real_t tmpV = vnew[i] ;
if ( fabs(tmpV - (Real_t)(1.0)) < v_cut )
tmpV = (Real_t)(1.0) ;
v[i] = tmpV ;
}
#ifdef USE_ASYNC
#pragma acc wait(0)
#endif
}
return ;
}
/******************************************/
static inline
void LagrangeElements(Index_t numElem)
{
Real_t *vnew = m_vnew; /* new relative vol -- temp */
CalcLagrangeElements(vnew) ;
/* Calculate Q. (Monotonic q option requires communication) */
CalcQForElems(vnew) ;
ApplyMaterialPropertiesForElems(vnew) ;
UpdateVolumesForElems(vnew, m_v,
m_v_cut, numElem) ;
}
/******************************************/
static inline
void CalcCourantConstraintForElems(Int_t length,
Index_t *regElemlist, Real_t *ss,
Real_t *vdov, Real_t *arealg,
Real_t qqc,
Real_t* dtcourant, Index_t numElem)
{
#if !defined(_OPENACC) && defined(_OPENMP)
Index_t threads = omp_get_max_threads();
static Index_t *courant_elem_per_thread;
static Real_t *dtcourant_per_thread;
static bool first = true;
if (first) {
courant_elem_per_thread = (Index_t*) calloc(threads, sizeof(Index_t));
dtcourant_per_thread = (Real_t*) calloc(threads, sizeof(Real_t));
first = false;
}
#else
Index_t threads = 1;
Index_t courant_elem_per_thread[1];
Real_t dtcourant_per_thread[1];
#endif
Index_t i;
#if !defined(_OPENACC) && defined(_OPENMP)
#pragma omp parallel firstprivate(length, qqc)
#endif
{
Real_t qqc2 = (Real_t)(64.0) * qqc * qqc ;
Real_t dtcourant_tmp = *dtcourant;
Index_t courant_elem = -1 ;
#if !defined(_OPENACC) && defined(_OPENMP)
Index_t thread_num = omp_get_thread_num();
#else
Index_t thread_num = 0;
#endif
#if !defined(_OPENACC) && defined(_OPENMP)
#pragma omp for
#endif
for (i = 0 ; i < length ; ++i) {
Index_t indx = regElemlist[i] ;
Real_t dtf = ss[indx] * ss[indx] ;
if ( vdov[indx] < (Real_t)(0.) ) {
dtf = dtf
+ qqc2 * arealg[indx] * arealg[indx]
* vdov[indx] * vdov[indx] ;
}
dtf = SQRT(dtf) ;
dtf = arealg[indx] / dtf ;
if (vdov[indx] != (Real_t)(0.)) {
if ( dtf < dtcourant_tmp ) {
dtcourant_tmp = dtf ;
courant_elem = indx ;
}
}
}
dtcourant_per_thread[thread_num] = dtcourant_tmp ;
courant_elem_per_thread[thread_num] = courant_elem ;
}
for (i = 1; i < threads; ++i) {
if (dtcourant_per_thread[i] < dtcourant_per_thread[0] ) {
dtcourant_per_thread[0] = dtcourant_per_thread[i];
courant_elem_per_thread[0] = courant_elem_per_thread[i];
}
}
if (courant_elem_per_thread[0] != -1) {
*dtcourant = dtcourant_per_thread[0] ;
}
return ;
}
/******************************************/
static inline
void CalcHydroConstraintForElems(Int_t length,
Index_t *regElemlist, Real_t *vdov,
Real_t dvovmax,
Real_t* dthydro, Index_t numElem)
{
/* ACC: vdov was updated in CalcCourantConstraintForElems so we don't need to
update it again. */
#if !defined(_OPENACC) && defined(_OPENMP)
Index_t threads = omp_get_max_threads();
static Index_t *hydro_elem_per_thread;
static Real_t *dthydro_per_thread;
static bool first = true;
if (first) {
hydro_elem_per_thread = (Index_t*) calloc(threads, sizeof(Index_t));
dthydro_per_thread = (Real_t*) calloc(threads, sizeof(Real_t));
first = false;
}
#else
Index_t threads = 1;
Index_t hydro_elem_per_thread[1];
Real_t dthydro_per_thread[1];
#endif
Index_t i;
#if !defined(_OPENACC) && defined(_OPENMP)
#pragma omp parallel firstprivate(length, dvovmax)
#endif
{
Real_t dthydro_tmp = *dthydro ;
Index_t hydro_elem = -1 ;
#if !defined(_OPENACC) && defined(_OPENMP)
Index_t thread_num = omp_get_thread_num();
#else
Index_t thread_num = 0;
#endif
#if !defined(_OPENACC) && defined(_OPENMP)
#pragma omp for
#endif
for (i = 0 ; i < length ; ++i) {
Index_t indx = regElemlist[i] ;
if (vdov[indx] != (Real_t)(0.)) {
Real_t dtdvov = dvovmax / (FABS(vdov[indx])+(Real_t)(1.e-20)) ;
if ( dthydro_tmp > dtdvov ) {
dthydro_tmp = dtdvov ;
hydro_elem = indx ;
}
}
}
dthydro_per_thread[thread_num] = dthydro_tmp ;
hydro_elem_per_thread[thread_num] = hydro_elem ;
}
for (i = 1; i < threads; ++i) {
if(dthydro_per_thread[i] < dthydro_per_thread[0]) {
dthydro_per_thread[0] = dthydro_per_thread[i];
hydro_elem_per_thread[0] = hydro_elem_per_thread[i];
}
}
if (hydro_elem_per_thread[0] != -1) {
*dthydro = dthydro_per_thread[0] ;
}
return ;
}
/******************************************/
static inline
void CalcTimeConstraintsForElems() {
// Initialize conditions to a very large value
m_dtcourant = 1.0e+20;
m_dthydro = 1.0e+20;
Index_t r;
/* wait for async mem updates to finish */
#ifndef USE_UNIFIEDMEM
#pragma acc wait
#endif
for (r=0 ; r < m_numReg ; ++r) {
/* evaluate time constraint */
CalcCourantConstraintForElems(m_regElemSize[r],
m_regElemlist[r], m_ss,
m_vdov, m_arealg,
m_qqc,
&m_dtcourant, m_numElem) ;
/* check hydro constraint */
CalcHydroConstraintForElems(m_regElemSize[r],
m_regElemlist[r], m_vdov,
m_dvovmax,
&m_dthydro, m_numElem);
}
}
/******************************************/
static inline
void LagrangeLeapFrog()
{
Index_t numElem = m_numElem;
#ifdef SEDOV_SYNC_POS_VEL_LATE
Real_t *fieldData[6] ;
/* wait for async device update to complete */
#ifndef USE_UNIFIEDMEM
#pragma acc wait
#endif
#endif
//[DEBUG by Seyong Lee] Below definitions are moved out of
//the above #ifdef macro region.
volatile Index_t numNode = m_numNode;
Real_t *x = m_x;
Real_t *y = m_y;
Real_t *z = m_z;
Real_t *xd = m_xd;
Real_t *yd = m_yd;
Real_t *zd = m_zd;
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
LagrangeNodal();
#pragma acc data present(x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode])
{
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
/* wait for async device update to complete (in LagrangeNodal) */
#ifndef USE_UNIFIEDMEM
#pragma acc wait
#endif
#endif
#ifdef SEDOV_SYNC_POS_VEL_LATE
/* asynchronously update on host before MPI comm */
volatile int up = 1;
#ifdef USE_UNIFIEDMEM
#pragma acc update host(x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode])
#else
#pragma acc update host(x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode]) \
async(up)
#endif
#endif
#endif
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
LagrangeElements(numElem);
// update values for CalcTimeConstraintsForElems as early as possible
#ifdef _OPENACC
Real_t *ss = m_ss;
Real_t *vdov = m_vdov;
Real_t *arealg = m_arealg;
#pragma acc data present(ss[numElem], \
vdov[numElem], \
arealg[numElem])
{
#ifdef USE_UNIFIEDMEM
#pragma acc update host(ss[numElem], \
vdov[numElem], \
arealg[numElem])
#else
#pragma acc update host(ss[numElem], \
vdov[numElem], \
arealg[numElem]) \
async
#endif
}
#endif
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_LATE
#ifndef USE_UNIFIEDMEM
#pragma acc wait(up)
#endif
CommRecv(MSG_SYNC_POS_VEL, 6,
m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1,
false, false) ;
fieldData[0] = x;
fieldData[1] = y;
fieldData[2] = z;
fieldData[3] = xd;
fieldData[4] = yd;
fieldData[5] = zd;
CommSend(MSG_SYNC_POS_VEL, 6, fieldData,
m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1,
false, false) ;
#endif
#endif
CalcTimeConstraintsForElems();
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_LATE
CommSyncPosVel() ;
#ifdef USE_UNIFIEDMEM
#pragma acc update device(x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode])
#else
#pragma acc update device(x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode]) \
async
#endif
#endif
#endif
} // end acc data
}
/******************************************/
int main(int argc, char *argv[])
{
int numRanks ;
int myRank ;
struct cmdLineOpts opts;
#if USE_MPI
Real_t *nodalMass;
MPI_Init(&argc, &argv) ;
MPI_Comm_size(MPI_COMM_WORLD, &numRanks) ;
MPI_Comm_rank(MPI_COMM_WORLD, &myRank) ;
printf("[%s:%d] [%d/%d]\n", __FILE__, __LINE__, myRank, numRanks);
#else
numRanks = 1;
myRank = 0;
#endif
#if LULESH_DUMP_OUTPUT
FILE *fp;
int i;
#endif
/* Set defaults that can be overridden by command line opts */
opts.its = 9999999;
opts.nx = 30;
opts.numReg = 11;
opts.numFiles = (int)(numRanks+10)/9;
opts.showProg = 0;
opts.quiet = 0;
opts.viz = 0;
opts.balance = 1;
opts.cost = 1;
ParseCommandLineOptions(argc, argv, myRank, &opts);
if ((myRank == 0) && (opts.quiet == 0)) {
printf("Running problem size %d^3 per domain until completion\n", opts.nx);
printf("Num processors: %d\n", numRanks);
#if !defined(_OPENACC) && defined(_OPENMP)
printf("Num threads: %d\n", omp_get_max_threads());
#endif
printf("Total number of elements: %d\n\n", numRanks*opts.nx*opts.nx*opts.nx);
printf("To run other sizes, use -s <integer>.\n");
printf("To run a fixed number of iterations, use -i <integer>.\n");
printf("To run a more or less balanced region set, use -b <integer>.\n");
printf("To change the relative costs of regions, use -c <integer>.\n");
printf("To print out progress, use -p\n");
printf("To write an output file for VisIt, use -v\n");
printf("See help (-h) for more options\n\n");
}
// Set up the mesh and decompose. Assumes regular cubes for now
Int_t col, row, plane, side;
InitMeshDecomp(numRanks, myRank, &col, &row, &plane, &side);
// Build the main data structure and initialize it
Domain(numRanks, col, row, plane, opts.nx,
side, opts.numReg, opts.balance, opts.cost) ;
#if USE_MPI
nodalMass = m_nodalMass;
// Initial domain boundary communication
CommRecv(MSG_COMM_SBN, 1,
m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1,
true, false) ;
CommSend(MSG_COMM_SBN, 1, &nodalMass,
m_sizeX + 1, m_sizeY + 1, m_sizeZ + 1,
true, false) ;
CommSBN(1, &nodalMass) ;
// End initialization
MPI_Barrier(MPI_COMM_WORLD);
#endif
// BEGIN timestep to solution */
Real_t start;
#if USE_MPI
start = MPI_Wtime();
#else
start = clock();
#endif
/* tmp region-based arrays */
int maxRegSize = 0;
Int_t r;
for (r=0 ; r < m_numReg ; r++) {
maxRegSize = MAX(maxRegSize, m_regElemSize[r]);
}
AllocateRegionTmps(maxRegSize);
#ifdef _OPENACC
Index_t numElem = m_numElem;
Index_t numElem8 = numElem * 8;
Index_t numNode = m_numNode;
Index_t size = m_sizeX;
Index_t numNodeBC = (size+1)*(size+1) ;
Index_t allElem = numElem + /* local elem */
2*m_sizeX*m_sizeY + /* plane ghosts */
2*m_sizeX*m_sizeZ + /* row ghosts */
2*m_sizeY*m_sizeZ ; /* col ghosts */
Real_t *fx = m_fx;
Real_t *fy = m_fy;
Real_t *fz = m_fz;
// load tmp arrays
Real_t *fx_elem = m_fx_elem;
Real_t *fy_elem = m_fy_elem;
Real_t *fz_elem = m_fz_elem;
Real_t *dvdx = m_dvdx;
Real_t *dvdy = m_dvdy;
Real_t *dvdz = m_dvdz;
Real_t *x8n = m_x8n;
Real_t *y8n = m_y8n;
Real_t *z8n = m_z8n;
Real_t *sigxx = m_sigxx;
Real_t *sigyy = m_sigyy;
Real_t *sigzz = m_sigzz;
Real_t *determ = m_determ;
Real_t *dxx = m_dxx;
Real_t *dyy = m_dyy;
Real_t *dzz = m_dzz;
Real_t *vnew = m_vnew;
Real_t *delv_xi = m_delv_xi;
Real_t *delv_eta = m_delv_eta;
Real_t *delv_zeta = m_delv_zeta;
Real_t *delx_xi = m_delx_xi;
Real_t *delx_eta = m_delx_eta;
Real_t *delx_zeta = m_delx_zeta;
Real_t *e_old = m_e_old ;
Real_t *delvc = m_delvc ;
Real_t *p_old = m_p_old ;
Real_t *q_old = m_q_old ;
Real_t *compression = m_compression ;
Real_t *compHalfStep = m_compHalfStep ;
Real_t *qq_old = m_qq_old ;
Real_t *ql_old = m_ql_old ;
Real_t *work = m_work ;
Real_t *p_new = m_p_new ;
Real_t *e_new = m_e_new ;
Real_t *q_new = m_q_new ;
Real_t *bvc = m_bvc ;
Real_t *pbvc = m_pbvc ;
Real_t *x = m_x;
Real_t *y = m_y;
Real_t *z = m_z;
Real_t *xd = m_xd;
Real_t *yd = m_yd;
Real_t *zd = m_zd;
Real_t *xdd = m_xdd;
Real_t *ydd = m_ydd;
Real_t *zdd = m_zdd;
Real_t *v = m_v;
Real_t *volo = m_volo;
Real_t *delv = m_delv;
Real_t *vdov = m_vdov;
Real_t *arealg = m_arealg;
#if !USE_MPI
/* nodalMass already defined if USE_MPI */
Real_t *nodalMass = m_nodalMass;
#endif
Real_t *elemMass = m_elemMass;
Real_t *ss = m_ss;
Index_t *lxim = m_lxim;
Index_t *lxip = m_lxip;
Index_t *letam = m_letam;
Index_t *letap = m_letap;
Index_t *lzetam = m_lzetam;
Index_t *lzetap = m_lzetap;
Real_t *p = m_p;
Real_t *e = m_e;
Real_t *q = m_q;
Real_t *qq = m_qq;
Real_t *ql = m_ql;
Index_t *symmX = m_symmX;
Index_t *symmY = m_symmY;
Index_t *symmZ = m_symmZ;
Index_t *nodelist = m_nodelist;
Index_t *nodeElemCount = m_nodeElemCount;
Index_t *nodeElemStart = m_nodeElemStart;
Index_t *nodeElemCornerList = m_nodeElemCornerList;
Index_t *elemBC = m_elemBC;
Index_t nCorner = nodeElemStart[numNode-1] + nodeElemCount[numNode-1];
/* Since these are only found in pragmas they'll be optimized out -- this
forces them to remain. If we instead switch all of these pointers to
volatile some crashes continue happening, so this seems to work best
for now. */
volatile Index_t dummyI = nodelist[numElem8-1] + nodeElemCount[numNode-1] +
nodeElemStart[numNode-1] + nodeElemCornerList[nCorner-1] +
lxim[numElem-1] + lxip[numElem-1] + letam[numElem-1] +
letap[numElem-1] + lzetam[numElem-1] + lzetap[numElem-1] +
elemBC[numElem-1];
if(!m_symmXempty)
dummyI += symmX[numNodeBC-1];
if(!m_symmYempty)
dummyI += symmY[numNodeBC-1];
if(!m_symmZempty)
dummyI += symmZ[numNodeBC-1];
volatile Real_t dummyR = x[numNode-1] + y[numNode-1] + z[numNode-1] +
xd[numNode-1] + yd[numNode-1] + zd[numNode-1] +
xdd[numNode-1] + ydd[numNode-1] + zdd[numNode-1] +
fx[numNode-1] + fy[numNode-1] + fz[numNode-1] +
fx_elem[numElem8-1] + fy_elem[numElem8-1] + fz_elem[numElem8-1] +
dvdx[numElem8-1] + dvdy[numElem8-1] + dvdz[numElem8-1] +
x8n[numElem8-1] + y8n[numElem8-1] + z8n[numElem8-1] +
sigxx[numElem-1] + sigyy[numElem-1] + sigzz[numElem-1] +
dxx[numElem-1] + dyy[numElem-1] + dzz[numElem-1] +
determ[numElem-1] + vnew[numElem-1] +
delv_xi[allElem-1] + delv_xi[allElem-1] + delv_eta[allElem-1] +
delv_zeta[allElem-1] + delx_xi[allElem-1] + delx_eta[allElem-1] +
delx_zeta[allElem-1] +
v[numElem-1] + volo[numElem-1] + delv[numElem-1] +
arealg[numElem-1] + vdov[numElem-1] + ss[numElem-1] +
p[numElem-1] + e[numElem-1] + q[numElem-1] +
qq[numElem-1] + ql[numElem-1] +
elemMass[numElem-1] + nodalMass[numNode-1] +
e_old[maxRegSize-1] + delvc[maxRegSize-1] +
p_old[maxRegSize-1] + q_old[maxRegSize-1] +
compression[maxRegSize-1] + compHalfStep[maxRegSize-1] +
qq_old[maxRegSize-1] + ql_old[maxRegSize-1] +
work[maxRegSize-1] + p_new[maxRegSize-1] +
e_new[maxRegSize-1] + q_new[maxRegSize-1] +
bvc[maxRegSize-1] + pbvc[maxRegSize-1];
if(myRank == 0) {
printf("Copying data to device...");
fflush(stdout);
}
#pragma acc data create(fx[numNode], \
fy[numNode], \
fz[numNode], \
fx_elem[numElem8], \
fy_elem[numElem8], \
fz_elem[numElem8], \
dvdx[numElem8], \
dvdy[numElem8], \
dvdz[numElem8], \
x8n[numElem8], \
y8n[numElem8], \
z8n[numElem8], \
sigxx[numElem], \
sigyy[numElem], \
sigzz[numElem], \
determ[numElem], \
dxx[numElem], \
dyy[numElem], \
dzz[numElem], \
vnew[numElem], \
delx_xi[allElem], \
delx_eta[allElem], \
delx_zeta[allElem], \
delv_xi[allElem], \
delv_eta[allElem], \
delv_zeta[allElem], \
e_old[maxRegSize], \
delvc[maxRegSize], \
p_old[maxRegSize], \
q_old[maxRegSize], \
compression[maxRegSize], \
compHalfStep[maxRegSize], \
qq_old[maxRegSize], \
ql_old[maxRegSize], \
work[maxRegSize], \
p_new[maxRegSize], \
e_new[maxRegSize], \
q_new[maxRegSize], \
bvc[maxRegSize], \
pbvc[maxRegSize]) \
copy(x[numNode], \
y[numNode], \
z[numNode], \
xd[numNode], \
yd[numNode], \
zd[numNode], \
p[numElem], \
e[numElem]) \
copyin(symmX[numNodeBC], \
symmY[numNodeBC], \
symmZ[numNodeBC], \
xdd[numNode], \
ydd[numNode], \
zdd[numNode], \
v[numElem], \
volo[numElem], \
delv[numElem], \
arealg[numElem], \
vdov[numElem], \
ss[numElem], \
q[numElem], \
qq[numElem], \
ql[numElem], \
nodalMass[numNode], \
elemMass[numElem], \
lxim[numElem], \
lxip[numElem], \
letam[numElem], \
letap[numElem], \
lzetam[numElem], \
lzetap[numElem], \
nodelist[numElem8], \
nodeElemCount[numNode], \
nodeElemStart[numNode], \
nodeElemCornerList[nCorner], \
elemBC[numElem])
#endif
{
#ifdef _OPENACC
if(myRank == 0) {
printf("done.\n");
fflush(stdout);
}
#endif
while((m_time < m_stoptime) && (m_cycle < opts.its)) {
TimeIncrement() ;
LagrangeLeapFrog() ;
if ((opts.showProg != 0) && (opts.quiet == 0) && (myRank == 0)) {
printf("cycle = %d, time = %e, dt=%e\n",
m_cycle, (double)(m_time), (double)(m_deltatime) ) ;
}
}
} // end acc data
// Use reduced max elapsed time
Real_t elapsed_time;
#if USE_MPI
elapsed_time = MPI_Wtime() - start;
#else
elapsed_time = (clock() - start) / CLOCKS_PER_SEC;
#endif
double elapsed_timeG;
#if USE_MPI
MPI_Reduce(&elapsed_time, &elapsed_timeG, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
#else
elapsed_timeG = elapsed_time;
#endif
#if LULESH_DUMP_OUTPUT
fp = fopen("lulesh.dump", "w");
for (i = 0; i < numNode; i++) fprintf(fp, "%.6f\n", x[i]);
for (i = 0; i < numNode; i++) fprintf(fp, "%.6f\n", y[i]);
for (i = 0; i < numNode; i++) fprintf(fp, "%.6f\n", z[i]);
fclose(fp);
#endif
if ((myRank == 0) && (opts.quiet == 0)) {
VerifyAndWriteFinalOutput(elapsed_timeG, opts.nx, numRanks);
}
#if USE_MPI
MPI_Finalize() ;
#endif
// OpenACC - release device ptrs
ReleaseDeviceMem();
return 0 ;
}
|
trmm_x_dia_u_lo_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT c = 0; c < columns; c++)
for (ALPHA_INT r = 0; r < mat->rows; r++){
alpha_mul(y[index2(c,r,ldy)],y[index2(c,r,ldy)],beta);
alpha_madde(y[index2(c,r,ldy)],x[index2(c,r,ldx)],alpha);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT cc = 0; cc < columns; ++cc)
{
ALPHA_Number* Y = &y[index2(cc,0,ldy)];
const ALPHA_Number* X = &x[index2(cc,0,ldx)];
for(ALPHA_INT di = 0; di < mat->ndiag;++di){
ALPHA_INT d = mat->distance[di];
if(d < 0){
ALPHA_INT ars = alpha_max(0,-d);
ALPHA_INT acs = alpha_max(0,d);
ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs);
for(ALPHA_INT i = 0; i < an; ++i){
ALPHA_INT ar = ars + i;
ALPHA_INT ac = acs + i;
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha);
alpha_madde(Y[ar],val,X[ac]);
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
GB_binop__first_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__first_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__first_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__first_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fc32)
// A*D function (colscale): GB (_AxD__first_fc32)
// D*A function (rowscale): GB (_DxB__first_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__first_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__first_fc32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fc32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_FC32 || GxB_NO_FIRST_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
rawSHA256_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2010 by Solar Designer
* based on rawMD4_fmt.c code, with trivial changes by groszek.
*
* Understands hex hashes as well as Cisco "type 4" base64.
*
* Rewritten Spring 2013, JimF. SSE code added and released with the following terms:
* No copyright is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the public
* domain is deemed null and void, then the software is Copyright (c) 2011 JimF
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawSHA256;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawSHA256);
#else
#include <stdint.h>
#include "arch.h"
#include "sha2.h"
#include "params.h"
#include "common.h"
#include "johnswap.h"
#include "formats.h"
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA256
/*
* Only effective for SIMD.
* Undef to disable reversing steps for benchmarking.
*/
#define REVERSE_STEPS
#ifdef _OPENMP
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-SHA256"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
/* Note: Cisco hashes are truncated at length 25. We currently ignore this. */
#ifdef SIMD_COEF_32
#define PLAINTEXT_LENGTH 55
#else
#define PLAINTEXT_LENGTH 125
#endif
#define _RAWSHA256_H
#include "rawSHA256_common.h"
#undef _RAWSHA256_H
#define BINARY_SIZE 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef SIMD_COEF_32
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
static uint32_t (*saved_key);
static uint32_t (*crypt_out);
#else
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)
[(DIGEST_SIZE + sizeof(uint32_t) - 1) / sizeof(uint32_t)];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifndef SIMD_COEF_32
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#else
saved_key = mem_calloc_align(self->params.max_keys_per_crypt * SHA_BUF_SIZ,
sizeof(*saved_key),
MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt * 8,
sizeof(*crypt_out),
MEM_ALIGN_SIMD);
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
}
static void *get_binary(char *ciphertext)
{
static unsigned int *outw;
unsigned char *out;
char *p;
int i;
if (!outw)
outw = mem_calloc_tiny(DIGEST_SIZE, MEM_ALIGN_WORD);
out = (unsigned char*)outw;
p = ciphertext + HEX_TAG_LEN;
for (i = 0; i < DIGEST_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
#ifdef SIMD_COEF_32
alter_endianity (out, DIGEST_SIZE);
#ifdef REVERSE_STEPS
sha256_reverse(outw);
#endif
#endif
return out;
}
#ifdef SIMD_COEF_32
#define HASH_IDX (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32)
static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static int binary_hash_0(void *binary) { return ((uint32_t*)binary)[0] & PH_MASK_0; }
static int binary_hash_1(void *binary) { return ((uint32_t*)binary)[0] & PH_MASK_1; }
static int binary_hash_2(void *binary) { return ((uint32_t*)binary)[0] & PH_MASK_2; }
static int binary_hash_3(void *binary) { return ((uint32_t*)binary)[0] & PH_MASK_3; }
static int binary_hash_4(void *binary) { return ((uint32_t*)binary)[0] & PH_MASK_4; }
static int binary_hash_5(void *binary) { return ((uint32_t*)binary)[0] & PH_MASK_5; }
static int binary_hash_6(void *binary) { return ((uint32_t*)binary)[0] & PH_MASK_6; }
#ifdef SIMD_COEF_32
static void set_key(char *key, int index) {
#if ARCH_ALLOWS_UNALIGNED
const uint32_t *wkey = (uint32_t*)key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const uint32_t *wkey = (uint32_t*)(is_aligned(key, sizeof(uint32_t)) ?
key : strcpy(buf_aligned, key));
#endif
uint32_t *keybuffer = &((uint32_t *)saved_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32];
uint32_t *keybuf_word = keybuffer;
unsigned int len;
uint32_t temp;
len = 0;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16));
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP(temp | (0x80U << 24));
len+=3;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP(temp);
len += 4;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80000000;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuffer[15*SIMD_COEF_32] = len << 3;
}
#else
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
#endif
#ifdef SIMD_COEF_32
static char *get_key(int index) {
unsigned int i,s;
static char out[PLAINTEXT_LENGTH+1];
unsigned char *wucp = (unsigned char*)saved_key;
s = ((uint32_t *)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] >> 3;
for (i=0;i<s;i++)
out[i] = wucp[ GETPOS(i, index) ];
out[i] = 0;
return (char*) out;
}
#else
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
#endif
#ifndef REVERSE_STEPS
#undef SSEi_REVERSE_STEPS
#define SSEi_REVERSE_STEPS 0
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
SIMDSHA256body(&saved_key[(unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32],
&crypt_out[(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32],
NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);
#else
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index], saved_len[index]);
SHA256_Final((unsigned char *)crypt_out[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_32
if (((uint32_t*) binary)[0] == crypt_out[HASH_IDX])
#else
if ( ((uint32_t*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
return ((uint32_t*)binary)[0] == crypt_out[HASH_IDX];
#else
return *(uint32_t*)binary == crypt_out[index][0];
#endif
}
static int cmp_exact(char *source, int index)
{
uint32_t *binary = get_binary(source);
char *key = get_key(index);
SHA256_CTX ctx;
uint32_t crypt_out[DIGEST_SIZE / sizeof(uint32_t)];
SHA256_Init(&ctx);
SHA256_Update(&ctx, key, strlen(key));
SHA256_Final((unsigned char*)crypt_out, &ctx);
#ifdef SIMD_COEF_32
alter_endianity(crypt_out, DIGEST_SIZE);
#ifdef REVERSE_STEPS
sha256_reverse(crypt_out);
#endif
#endif
return !memcmp(binary, crypt_out, DIGEST_SIZE);
}
struct fmt_main fmt_rawSHA256 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA256 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD |
FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{
HEX_TAG,
CISCO_TAG
},
sha256_common_tests
}, {
init,
done,
fmt_default_reset,
sha256_common_prepare,
sha256_common_valid,
sha256_common_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
rng.c | /**
* @file rng.c
* @author Michael Trotter & Matt Goodrum
* @brief Uniform and Normal RNG Implemented in OpenMP
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include <limits.h>
#define PI acos(-1)
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* A simple main that demonstrates how to setup the seed array for use
*/
int main(){
//define the length of the seed array
int length = 10000;
//declare seed array
int * seed = (int *)malloc(sizeof(int)*length);
//establish original values
//the current time * the index is good enough for most uses.
int x;
for(x = 0; x < length; x++)
{
seed[x] = time(0)*x;
}
//make kernel calls etc; device functions can now use seed array to generate normal and uniform random numbers
/* Example
#pragma omp parallel for shared(arrayX, arrayY, length, seed) private(x)
for(x = 0; x < length; x++){
arrayX[x] += 1 + 5*randn(seed, x);
arrayY[x] += -2 + 2*randn(seed, x);
}
*/
//free allocated memory
free(seed);
return 0;
} |
GB_unop__identity_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_int64)
// op(A') function: GB (_unop_tran__identity_fp32_int64)
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_int64)
(
float *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
VolumetricMaxUnpooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricMaxUnpooling.c"
#else
static void nn_(VolumetricMaxUnpooling_updateOutput_frame)(real *input_p, real *output_p,
real *ind_p,
long nslices,
long itime, long iwidth, long iheight,
long otime, long owidth, long oheight,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
long ti, i, j, maxz, maxy, maxx;
for(ti = 0; ti < itime; ti++)
{
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
long start_t = ti * dT - padT;
long start_h = i * dH - padH;
long start_w = j * dW - padW;
//real *output_p_k = output_p + k*otime*owidth*oheight + ti*owidth*oheight*dT + i*owidth*dH + j*dW;
real *input_p_k = input_p + k*itime*iwidth*iheight + ti*iwidth*iheight + i*iwidth + j;
real *ind_p_k = ind_p + k*itime*iwidth*iheight + ti*iwidth*iheight + i*iwidth + j;
maxz = ((unsigned char*)(ind_p_k))[0]; /* retrieve position of max */
maxy = ((unsigned char*)(ind_p_k))[1];
maxx = ((unsigned char*)(ind_p_k))[2];
if(start_t+maxz<0 || start_h+maxy<0 || start_w+maxx<0 || start_t+maxz>=otime || start_h+maxy>=oheight || start_w+maxx>=owidth)
{
THError("invalid max index z= %d, y= %d, x= %d, otime= %d, owidth= %d, oheight= %d", start_t+maxz, start_h+maxy, start_w+maxx, otime, owidth, oheight);
}
output_p[k*otime*owidth*oheight + oheight*owidth*(start_t+maxz) + owidth*(start_h+maxy) + (start_w+maxx)] = *input_p_k; /* update output */
}
}
}
}
}
static int nn_(VolumetricMaxUnpooling_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor);
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
int otime = luaT_getfieldcheckint(L, 1, "otime");
int owidth = luaT_getfieldcheckint(L, 1, "owidth");
int oheight = luaT_getfieldcheckint(L, 1, "oheight");
int dT = luaT_getfieldcheckint(L, 1, "dT");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int padT = luaT_getfieldcheckint(L, 1, "padT");
int padH = luaT_getfieldcheckint(L, 1, "padH");
int padW = luaT_getfieldcheckint(L, 1, "padW");
int dimw = 3;
int dimh = 2;
int dimt = 1;
int nbatch = 1;
int nslices;
int itime;
int iheight;
int iwidth;
real *input_data;
real *output_data;
real *indices_data;
luaL_argcheck(L, input->nDimension == 4 || input->nDimension == 5 , 2, "4D or 5D (batch mode) tensor expected");
if (!THTensor_(isSameSizeAs)(input, indices)){
THError("Invalid input size w.r.t current indices size");
}
if (input->nDimension == 5)
{
nbatch = input->size[0];
dimt++;
dimw++;
dimh++;
}
/* sizes */
nslices = input->size[dimt-1];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
/* get contiguous input */
input = THTensor_(newContiguous)(input);
indices = THTensor_(newContiguous)(indices);
/* resize output */
if (input->nDimension == 4)
{
THTensor_(resize4d)(output, nslices, otime, oheight, owidth);
THTensor_(zero)(output);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THTensor_(data)(indices);
nn_(VolumetricMaxUnpooling_updateOutput_frame)(input_data, output_data,
indices_data,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH, padT, padW, padH);
}
else
{
long p;
THTensor_(resize5d)(output, nbatch, nslices, otime, oheight, owidth);
THTensor_(zero)(output);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THTensor_(data)(indices);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
nn_(VolumetricMaxUnpooling_updateOutput_frame)(input_data+p*nslices*itime*iwidth*iheight, output_data+p*nslices*otime*owidth*oheight,
indices_data+p*nslices*itime*iwidth*iheight,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH, padT, padW, padH);
}
}
/* cleanup */
THTensor_(free)(input);
THTensor_(free)(indices);
return 1;
}
static void nn_(VolumetricMaxUnpooling_updateGradInput_frame)(real *gradInput_p, real *gradOutput_p,
real *ind_p,
long nslices,
long itime, long iwidth, long iheight,
long otime, long owidth, long oheight,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
long ti, i, j, maxz, maxy, maxx;
for(ti = 0; ti < itime; ti++)
{
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
long start_t = ti * dT - padT;
long start_h = i * dH - padH;
long start_w = j * dW - padW;
real *gradInput_p_k = gradInput_p + k*itime*iwidth*iheight + ti*iwidth*iheight + i*iwidth + j;
//real *gradOutput_p_k = gradOutput_p + k*otime*owidth*oheight + ti*owidth*oheight*dT + i*owidth*dH + j*dW;
real *ind_p_k = ind_p + k*itime*iwidth*iheight + ti*iwidth*iheight + i*iwidth + j;
maxz = ((unsigned char*)(ind_p_k))[0]; /* retrieve position of max */
maxy = ((unsigned char*)(ind_p_k))[1];
maxx = ((unsigned char*)(ind_p_k))[2];
if(start_t+maxz<0 || start_h+maxy<0 || start_w+maxx<0 || start_t+maxz>=otime || start_h+maxy>=oheight || start_w+maxx>=owidth)
{
THError("invalid max index z= %d, y= %d, x= %d, otime= %d, owidth= %d, oheight= %d", start_t+maxz, start_h+maxy, start_w+maxx, otime, owidth, oheight);
}
*gradInput_p_k = gradOutput_p[k*otime*owidth*oheight + oheight*owidth*(start_t+maxz) + owidth*(start_h+maxy) + (start_w+maxx)]; /* update gradient */
}
}
}
}
}
static int nn_(VolumetricMaxUnpooling_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor);
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
int otime = luaT_getfieldcheckint(L, 1, "otime");
int owidth = luaT_getfieldcheckint(L, 1, "owidth");
int oheight = luaT_getfieldcheckint(L, 1, "oheight");
int dT = luaT_getfieldcheckint(L, 1, "dT");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int padT = luaT_getfieldcheckint(L, 1, "padT");
int padH = luaT_getfieldcheckint(L, 1, "padH");
int padW = luaT_getfieldcheckint(L, 1, "padW");
int dimw = 3;
int dimh = 2;
int dimt = 1;
int nbatch = 1;
int nslices;
int itime;
int iheight;
int iwidth;
real *gradInput_data;
real *gradOutput_data;
real *indices_data;
if (!THTensor_(isSameSizeAs)(input, indices)){
THError("Invalid input size w.r.t current indices size");
}
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
indices = THTensor_(newContiguous)(indices);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 5) {
nbatch = input->size[0];
dimt++;
dimw++;
dimh++;
}
/* sizes */
nslices = input->size[dimt-1];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
if(otime!=gradOutput->size[dimt] || owidth!=gradOutput->size[dimw] || oheight!=gradOutput->size[dimh]){
THError("Inconsistent gradOutput size. otime= %d, oheight= %d, owidth= %d, gradOutput: %dx%d", otime, oheight, owidth,gradOutput->size[dimh],gradOutput->size[dimw]);
}
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
indices_data = THTensor_(data)(indices);
/* backprop */
if (input->nDimension == 4)
{
nn_(VolumetricMaxUnpooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
indices_data,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH,
padT, padW, padH);
}
else
{
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
nn_(VolumetricMaxUnpooling_updateGradInput_frame)(gradInput_data+p*nslices*itime*iwidth*iheight, gradOutput_data+p*nslices*otime*owidth*oheight,
indices_data+p*nslices*itime*iwidth*iheight,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH,
padT, padW, padH);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
THTensor_(free)(indices);
return 1;
}
static const struct luaL_Reg nn_(VolumetricMaxUnpooling__) [] = {
{"VolumetricMaxUnpooling_updateOutput", nn_(VolumetricMaxUnpooling_updateOutput)},
{"VolumetricMaxUnpooling_updateGradInput", nn_(VolumetricMaxUnpooling_updateGradInput)},
{NULL, NULL}
};
static void nn_(VolumetricMaxUnpooling_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, nn_(VolumetricMaxUnpooling__), "nn");
lua_pop(L,1);
}
#endif
|
util.h | #pragma once
#include <dirent.h>
#include <sys/stat.h>
#include <omp.h>
#include "type.h"
#include "new_func.h"
#include "wtime.h"
off_t fsize(const string& fname)
{
struct stat st;
if (0 == stat(fname.c_str(), &st)) {
return st.st_size;
}
perror("stat issue");
return -1L;
}
off_t fsize(int fd)
{
struct stat st;
if (0 == fstat(fd, &st)) {
return st.st_size;
}
perror("stat issue");
return -1L;
}
off_t fsize_dir(const string& idir)
{
struct dirent *ptr;
DIR *dir;
string filename;
index_t size = 0;
index_t total_size = 0;
//allocate accuately
dir = opendir(idir.c_str());
while (NULL != (ptr = readdir(dir))) {
if (ptr->d_name[0] == '.') continue;
filename = idir + "/" + string(ptr->d_name);
size = fsize(filename);
total_size += size;
}
closedir(dir);
return total_size;
}
inline short CorePin(int coreID)
{
int s, j;
cpu_set_t cpuset;
pthread_t thread;
thread = pthread_self();
/* Set affinity mask to include CPUs 0 to 7 */
CPU_ZERO(&cpuset);
CPU_SET(coreID, &cpuset);
//for (j = 0; j < 8; j++) CPU_SET(j, &cpuset);
s = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
if (s != 0) {
cout << "failed to set the core" << endl;
//handle_error_en(s, "pthread_setaffinity_np");
}
/* Check the actual affinity mask assigned to the thread */
/*
s = pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
if (s != 0)
handle_error_en(s, "pthread_getaffinity_np");
printf("Set returned by pthread_getaffinity_np() contained:\n");
for (j = 0; j < CPU_SETSIZE; j++)
if (CPU_ISSET(j, &cpuset))
printf(" CPU %d\n", j);
*/
return 0;
}
inline index_t upper_power_of_two(index_t v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v |= v >> 32;
v++;
return v;
}
//You must be sure that it is a perfect power
inline int ilog2(index_t e)
{
return __builtin_ctzll(e);
}
inline
index_t alloc_mem_dir(const string& idirname, char** buf, bool alloc)
{
index_t total_size = fsize_dir(idirname);
void* local_buf = malloc(total_size);
/*
void* local_buf = mmap(0, total_size, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB|MAP_HUGE_2MB, 0, 0);
if (MAP_FAILED == local_buf) {
cout << "huge page alloc failed while reading input dir" << endl;
local_buf = malloc(total_size);
}*/
*buf = (char*)local_buf;
return total_size;
}
inline
index_t read_text_file(const string& filename, char* edges)
{
FILE* file = fopen(filename.c_str(), "rb");
assert(file != 0);
index_t size = fsize(filename);
if (size!= fread(edges, sizeof(char), size, file)) {
assert(0);
}
return size;
}
//------- The APIs to use by higher level function -------//
inline
index_t read_text_dir(const string& idirname, char*& edges)
{
index_t dir_size = fsize_dir(idirname);
edges = (char*)malloc(dir_size);
//Read graph files
struct dirent *ptr;
FILE* file = 0;
int file_count = 0;
string filename;
index_t size = 0;
index_t total_size = 0;
char* edge;
double start = mywtime();
DIR* dir = opendir(idirname.c_str());
while (NULL != (ptr = readdir(dir))) {
if (ptr->d_name[0] == '.') continue;
filename = idirname + "/" + string(ptr->d_name);
file_count++;
file = fopen((idirname + "/" + string(ptr->d_name)).c_str(), "rb");
assert(file != 0);
size = fsize(filename);
edge = edges + total_size;
if (size!= fread(edge, sizeof(char), size, file)) {
assert(0);
}
total_size += size;
}
closedir(dir);
double end = mywtime();
cout << " Reading " << file_count << " file time = " << end - start << endl;
//cout << "total size = " << total_size << endl;
return total_size;
}
template <class T>
index_t read_idir_text(const string& idirname, ubatch_t* ubatch,
typename callback<T>::parse_fn_t parse_and_insert, int64_t flags = 0)
{
struct dirent *ptr;
DIR *dir;
int file_count = 0;
string filename;
string ofilename;
//count the files
dir = opendir(idirname.c_str());
while (NULL != (ptr = readdir(dir))) {
if (ptr->d_name[0] == '.') continue;
file_count++;
}
closedir(dir);
assert(file_count !=0);
string* ifiles = new string[file_count];
int icount = 0;
//Read graph files:
dir = opendir(idirname.c_str());
while (NULL != (ptr = readdir(dir))) {
if (ptr->d_name[0] == '.') continue;
filename = idirname + "/" + string(ptr->d_name);
//cout << "ifile= " << filename << endl ;
ifiles[icount++] = filename;
}
closedir(dir);
index_t edge_count = 0;
double end ;
int portion = icount/_numlogs;
int my_start = _rank*portion;
int my_end = my_start + portion;
if (_rank == _numlogs - 1) my_end = icount;
double start = mywtime();
//cout << my_start << ":" << my_end << ":" << icount << endl;
#pragma omp parallel num_threads(_num_sources) reduction(+:edge_count)
{
#pragma omp for schedule (static)
for (int i = my_start; i < my_end; ++i) {
edge_count += parse_and_insert(ifiles[i], ubatch, flags);
//cout << edge_count << endl;
}
}
end = mywtime();
cout <<" Logging Time from Files = "<< end - start << endl;
//cout << " vertex count = " << vid << endl;
return edge_count;
}
/*
template <class T>
index_t read_idir_text2(const string& idirname, ubatch_t* ubatch,
typename callback<T>::parse_fn2_t parse_and_insert, int64_t flags = 0)
{
struct dirent *ptr;
DIR *dir;
int file_count = 0;
string filename;
string ofilename;
//count the files
dir = opendir(idirname.c_str());
while (NULL != (ptr = readdir(dir))) {
if (ptr->d_name[0] == '.') continue;
file_count++;
}
closedir(dir);
assert(file_count !=0);
string* ifiles = new string[file_count];
int icount = 0;
//Read graph files
dir = opendir(idirname.c_str());
while (NULL != (ptr = readdir(dir))) {
if (ptr->d_name[0] == '.') continue;
filename = idirname + "/" + string(ptr->d_name);
//cout << "ifile= " << filename << endl ;
ifiles[icount++] = filename;
}
closedir(dir);
index_t edge_count = 0;
int portion = icount/_numlogs;
int my_start = _rank*portion;
int my_end = my_start + portion;
if (_rank == _numlogs - 1) my_end = icount;
//cout << my_start << ":" << my_end << ":" << icount << endl;
index_t line = 0;
#pragma omp parallel num_threads(_num_sources) reduction(+:line)
{
index_t total_size = 0;
#pragma omp for schedule(static)
for (int i = my_start; i < my_end; ++i) {
total_size += fsize(ifiles[i]);
}
char* buf = (char*)malloc(total_size);
index_t size = 0;
#pragma omp for schedule (static)
for (int i = my_start; i < my_end; ++i) {
size += read_text_file(ifiles[i], buf + size);
}
//Now the batching starts
double start = mywtime();
line += parse_and_insert(buf, ubatch, total_size, flags);
double end = mywtime();
cout << "Logging time from in-memory data-source = " << end - start << endl;
}
return line;
}
template <class T>
index_t read_bin_dir(const string& idirname, edgeT_t<T>* edges)
{
//Read graph files
struct dirent *ptr;
FILE* file = 0;
int file_count = 0;
string filename;
index_t size = 0;
index_t edge_count = 0;
index_t total_edge_count = 0;
edgeT_t<T>* edge;
double start = mywtime();
DIR* dir = opendir(idirname.c_str());
while (NULL != (ptr = readdir(dir))) {
if (ptr->d_name[0] == '.') continue;
filename = idirname + "/" + string(ptr->d_name);
file_count++;
file = fopen((idirname + "/" + string(ptr->d_name)).c_str(), "rb");
assert(file != 0);
size = fsize(filename);
edge_count = size/sizeof(edgeT_t<T>);
edge = edges + total_edge_count;
if (edge_count != fread(edge, sizeof(edgeT_t<T>), edge_count, file)) {
assert(0);
}
total_edge_count += edge_count;
}
closedir(dir);
double end = mywtime();
cout << " Reading " << file_count << " file time = " << end - start << endl;
//cout << "Total Edge Count = " << total_edge_count << endl;
return total_edge_count;
}
*/
template <class T>
index_t read_idir(const string& idirname, edgeT_t<T>** pedges, bool alloc)
{
//allocate accuately
char* buf = 0;
index_t total_size = alloc_mem_dir(idirname, &buf, alloc);
index_t total_edge_count = total_size/sizeof(edgeT_t<T>);
*pedges = (edgeT_t<T>*)buf;
read_text_dir(idirname, (char*&)*pedges);
/*if (total_edge_count != read_bin_dir(idirname, *pedges)) {
assert(0);
}*/
return total_edge_count;
}
template <class T>
index_t add_edges_from_dir(const string& idirname, ubatch_t* ubatch, int64_t flags)
{
CorePin(0);
//Batch Graph
double start = mywtime();
index_t edge_count = 0;
if (0 == IS_SOURCE_BINARY(flags)) {//text
edge_count = read_idir_text<T>(idirname, ubatch, parsefile_and_insert<T>, flags);
} else {//binary
edge_count = read_idir_text<T>(idirname, ubatch, file_and_insert<T>, flags);
}
double end = mywtime();
cout << "Batch Update Time (File) = " << end - start
<< " Edge_count = " << edge_count << endl;
return edge_count;
}
|
deltapq_create_approx_tree.h |
#include "pq.h"
#include "create_tree.h"
#include <algorithm>
#include <pthread.h>
#include <sys/time.h>
#include <unistd.h>
#include <cmath>
#include <vector>
#include <string>
#include <fstream>
#include <string.h>
#include <iostream>
#include <assert.h>
#include <algorithm>
#include <inttypes.h>
#include <sys/time.h>
#include <time.h>
#include <unordered_map>
#include <unordered_set>
#include <omp.h>
#include <parallel/algorithm>
#include <bitset>
#include <fcntl.h> // for direct I/O
#include <errno.h>
extern int PQ_M;
extern int PQ_K;
extern int with_id;
extern string ext; // vector type, fvecs or bvecs
extern int dim; // dimension default is 128
#define BLKNBS 4096
#define BYTELEN 8
#define BLKNBITS BLKNBS*BYTELEN
#define NUM_DIM 8 // M
//#define NUM_DIFF 3
#define NUM_DIFF 8
#define EPSILON 0.000001
#define EPS 0.000001
long long int global_diff_sum = 0;
struct DummyNodes {
int m = 8;
uint size = 0;
vector<uint> ids; // vector ids
void push_back(uint node_id) {
ids.push_back(node_id);
size ++;
}
void init(int M) {
m = M;
ids.resize(0);
size = 0;
}
void init_nodes(const uchar* codes, long long num_codes, int M) {
m = M;
ids.resize(num_codes);
for (long long i = 0; i < num_codes; i ++) {
ids[i] = i;
}
size = num_codes;
}
void clear() {
ids.resize(0); size = 0;
}
};
vector<PQ::Array> global_m_codewords;
int global_m_Ds;
vector<float> global_query;
float** m_sub_distances=NULL;
float*** batch_m_sub_distances=NULL;
float** mkk_tables;
const uchar* pqcodes;
int n_queries=0;
struct QNode;
// Query Node
struct QNode {
uint vec_id;
uint parent_pos;
uint child_pos_start;
uint child_num;
uint sub_tree_size=1; // including itself
float qdist = 0.0;
float max_dist=0;
float max_dist2p=0; // maximum distance to its parent
uchar diff_num;
uchar depth;
array<Diff, NUM_DIFF> diffs;
QNode(uint id, uint pid) : vec_id(id), parent_pos(pid), diff_num(0) { }
QNode() : diff_num(0) {}
// set up vector id and parent position
void set_id_parent_pos(uint id, uint p_pos) {
vec_id = id;
parent_pos = p_pos;
}
};
uchar* comprsd_dfs_codes;
vector<bool> is_active;
// for building trees
//void read_tree_index_file_approx(const string &file_name, QNode* nodes);
bool create_approx_tree(const string &dataset_path, const uchar* codes,
vector<uchar>& dummycodes, vector<bool>& dummymarks,
int M, int K, uint num_codes, int diff_argument,
int PART_NUM,
QNode* nodes, vector<uint> &root_num_array,
float** m_sub_distance,
float** dist_tables, int max_height_folds,
int method = 1);
void find_edges_by_diff_approx(const string &dataset_path,
const uchar* codes, vector<uchar>& dummycodes,
vector<bool>& dummymarks, int M, int K,
uint num_codes,uint& global_num_dummy,int diff_argument,
vector<pair<uint, uint>>& edges, float** dist_tables,
uint& root_id, int max_height_folds,
int method = 1);
void partition_linear_opt_approx_with_constraint(const uchar* codes,
vector<uchar>& dummycodes, vector<bool>& dummymarks,
int M, int K, uint num_codes,
uint& global_num_dummy, int DIFF_BY, uint* parents,
uint* rank, vector<pair<uint, uint>>& edges,
DummyNodes& dummy_nodes, DummyNodes& next,
DummyNodes& finalists, uint& root_id,
vector<uchar>& heights, int MAX_HEIGHT);
void partition_linear_opt_approx_with_constraint_WOH(const uchar* codes,
vector<uchar>& dummycodes, vector<bool>& dummymarks,
int M, int K, uint num_codes,
uint& global_num_dummy, int DIFF_BY, uint* parents,
uint* rank, vector<pair<uint, uint>>& edges,
DummyNodes& dummy_nodes, DummyNodes& next,
DummyNodes& finalists, uint& root_id,
vector<uchar>& heights, int MAX_HEIGHT);
void partition_linear_opt_approx_clique_with_constraint_on_size(
const uchar* codes,
vector<uchar>& dummycodes, vector<bool>& dummymarks,
int M, int K, int nparts, uint num_codes,
uint& global_num_dummy, int DIFF_BY, uint* parents,
uint* rank, vector<pair<uint, uint>>& edges,
DummyNodes& dummy_nodes, DummyNodes& next,uint& root_id,
vector<uchar>& heights, int MAX_HEIGHT,
int min_clique_size);
void edges_to_tree_index_approx_dfs_layout(const string &dataset_path,
const uchar* codes, vector<uchar>& dummycodes,
vector<bool>& dummymarks, int M, int K,
uint num_codes, uint global_num_dummy,
vector<pair<uint,uint>>& edges,
QNode* nodes, uint& root_id, int method);
bool qnodes_to_compressed_codes(const string &dataset_path, const uchar* codes,
int M, int K, uint& num_codes, int diff_argument,
long long n_diffs,
QNode* nodes, float** m_sub_distances_,
float** dist_tables, int method);
bool qnodes_to_compressed_codes_opt(const string &dataset_path, const uchar* codes,
int M, int K, uint& num_codes, int diff_argument,
long long n_diffs,
QNode* nodes, float** m_sub_distances_,
float** dist_tables, int method);
bool qnodes_to_compressed_codes_opt_block_aware(const string &dataset_path, const uchar* codes,
int M, int K, uint& num_codes, int diff_argument,
long long n_diffs,
QNode* nodes, float** m_sub_distances_,
float** dist_tables);
bool row_store_qnodes_to_compressed_codes_opt(const string &dataset_path, const uchar* codes,
int M, int K, uint& num_codes, int diff_argument,
long long n_diffs,
QNode* nodes, float** m_sub_distances_,
float** dist_tables, int method);
// for checking clique information
void check_clique_info(const uchar* codes, int M, int K, uint num_codes,
int DIFF_BY, uint* parents, uint* rank,
vector<pair<uint, uint>>& edges, float** dist_tables);
void dfs(QNode* nodes, uint node_id, vector<pair<int, float>>& results,
uint& res_id, float dist, int depth);
void dfs(QNode* nodes, uint node_pos, vector<pair<int, float>>& results,
uint& res_idx, float dist);
float cal_dist_from_query(long long id, int PQ_M, const uchar* vecs,
float** m_sub_distances);
float cal_distance_by_tables(uint a, uint b, float** dist_tables, const uchar* vecs, uint m_Ks) {
float sum = 0;
for (int m = 0; m < PQ_M; m ++) {
int c_a = (int) vecs[(long long)a*PQ_M+m];
int c_b = (int) vecs[(long long)b*PQ_M+m];
sum += dist_tables[m][c_a*m_Ks+c_b]; // m_Ks = PQ_K
}
return sum;
}
long long check_num_diffs(const uchar* codes, int M, int K, int num_codes, vector<pair<uint, uint>>& edges) {
for (long long i = 0; i < 10; i ++) {
for (int m = 0; m < M; m ++) {
if (K <= 256) {
cout << (int)codes[(num_codes-1-i)*PQ_M+m] << " ";
} else {
//cout << (int)((uint16_t*)vecs)[(N-1-i)*PQ_M+m] << " ";
cout << ((uint16_t*)codes)[GetIndex(PQ_M, num_codes - 1- i, m)] << " ";
}
}
cout << endl;
}
// check total number of diffs
long long n_diffs = 0;
int info_n=0;
for (long long i = 0; i < edges.size(); i ++) {
long long id_a = edges[i].first;
long long id_b = edges[i].second;
if (i < info_n) {
cout << "ida " << id_a << " idb " << id_b << endl;
}
for (int m = 0; m < M; m ++) {
uint from;
uint to;
if (K > 256) {
from = ((uint16_t*)codes)[GetIndex(M, id_a, m)];
to = ((uint16_t*)codes)[GetIndex(M, id_b, m)];
} else {
from = codes[GetIndex(M, id_a, m)];
to = codes[GetIndex(M, id_b, m)];
}
if (from != to) {
if (i < info_n) cout << "(" << from << ", " << to <<") ";
n_diffs++;
}
}
if (i < info_n) cout << n_diffs << endl;
}
cout << "TOTAL number of diffs is " << n_diffs << endl;
cout << "PQ_M = " << PQ_M << " K " << K << endl;
return n_diffs;
}
void read_qnodes_from_file(const string &dataset_path, const uchar* codes,
int M, int K, uint num_codes, int diff_argument,
QNode* nodes, int method) {
// Get QNodes first
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_TreeNodesDFS";
if (with_id) file_name = file_name + "_with_id";
switch (method) {
case 1:
break;
case 2:
file_name = file_name + "_WOH";
break;
case 3:
file_name = file_name + "_clique";
break;
}
file_name = file_name + "_N" + to_string(num_codes);
if (exists_test3(file_name)) {
ifstream ifs(file_name, ios::binary);
if (!ifs.is_open()) {
cerr << "Error: cannot open QNodes from " << file_name << ends;
assert(0);
}
// load tree from file
ifs.read( (char*) &(nodes[0]), sizeof(QNode) * (num_codes+1));
cout << "Nodes are read from file " << file_name << endl;
}
}
/*
void partition_linear_opt_approx_with_constraint_bitset(const uchar* codes,
vector<uchar>& dummycodes, vector<bool>& dummymarks,
int M, int K, uint num_codes,
uint& global_num_dummy, int DIFF_BY, uint* parents,
uint* rank, vector<pair<uint, uint>>& edges,
vector<bool>& is_connected, uint& root_id,
vector<uchar>& heights, int MAX_HEIGHT)
{
int LOG_K = round(log2(K));
timeval beg, mid, mid1, end, all_st, all_en;
gettimeofday(&all_st, NULL);
cout << "Find diff = " << DIFF_BY << endl;
vector<vector<int>> combinations;
cout << " DIFF_BY = " << DIFF_BY << endl;
int nparts = M;
assert(nparts >= DIFF_BY);
nchoosek(nparts, nparts-DIFF_BY, combinations);
cout << "For loop begins " << get_current_time_str() << endl;
vector<pair<uint128_t, uint>> hash_array;
cout << combinations.size() << " combination(s)" << endl;
long long debug_clique_size_sum = 0;
long long debug_diff_num_sum = 0;
for (auto k = 0; k < combinations.size(); k++) {
hash_array.resize(0);
//cout << k << " th combination" << endl ;
//if (num_codes >= 1000000000) cout << k << " " ;
gettimeofday(&beg, NULL);
//cout << "All node size " << dummy_nodes.size << endl;
//cout << "global num dummy " << global_num_dummy << endl;
for (long long l = 0; l < num_codes; l ++) {
int sp_count = 0;
uint128_t hash = 0x0000000000000000ULL;
uint code_id = l;
if (is_connected[code_id] == true) continue;
for (auto it = combinations[k].begin(); it != combinations[k].end();
it++) {
//hash |= (static_cast<uint128_t>(codes[GetIndex(M, code_id, *it)]) << (LOG_K * (*it)));
if (nparts <= M) {
long start = (*it)*(M/nparts);
long end = (*it+1)*(M/nparts);
for (auto iter = start; iter < end; iter ++) {
int cid;
if (K > 256) {
cid = ((uint16_t*)codes)[GetIndex(M, code_id, iter)];
} else {
cid = codes[GetIndex(M, code_id, iter)];
}
hash |= (static_cast<uint128_t>(codes[GetIndex(M, code_id, iter)]) << (LOG_K * (iter)));
}
} else {
int num_part = nparts/M;
int m = (*it) / num_part;
int part = (*it) % num_part;
int val = (uchar)(codes[GetIndex(M, code_id, m)]);
int val_length = LOG_K / num_part;
val = (val >> (val_length*part)) % ((1<<val_length)-1);
//cout << (*it) << endl;
//cout << bitset<8>(val) << endl;
hash |= (static_cast<uint128_t>(val) << (val_length* (*it)));
//cout << bitset<64>((uint64_t)hash) << endl;
}
}
//exit(0);
hash_array.emplace_back(hash, code_id);// put index of dummy_nodes here
// as push_back() in DummyNode required
}
cout << k << " th combination with nodes number of " << hash_array.size() << endl ;
//gettimeofday(&mid, NULL);
//cout << " calculate hash codes " <<mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//gettimeofday(&beg, NULL);
// sort the hash codes
// Explicitly force a call to parallel sort.
//cout << "hash array size " << hash_array.size() << endl;
__gnu_parallel::sort(hash_array.begin(), hash_array.end(),
[](const pair<uint128_t, uint32_t>& a,
const pair<uint128_t, uint32_t>&b) -> bool {
return a.first < b.first;
});
//gettimeofday(&mid, NULL);
//cout << " sort codes " << mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//gettimeofday(&beg, NULL);
// traverse hash array
for (uint i = 0; i < hash_array.size(); i ++) {
uint end = i+1;
for (; end < hash_array.size(); end++) {
if (hash_array[end].first != hash_array[i].first)
break;
}
if (end == i+1) {
continue;
}
debug_clique_size_sum += end-i;
// process the clique
// find the highest node as parent
int max_height = -1;
uint parent_id = 0;
for (long long j = i; j < end; j ++) {
uint code_id = hash_array[j].second;
if ((int)(heights[code_id]) > max_height) {
max_height = heights[code_id];
parent_id = code_id;
}
}
// find the second highest node
int second_height = 0;
for (long long j = i; j < end; j ++) {
uint code_id = hash_array[j].second;
if (code_id == parent_id) continue;
if ((int)(heights[code_id]) > second_height) {
second_height = heights[code_id];
}
}
if (second_height == max_height) heights[parent_id] ++;
if (max_height >= MAX_HEIGHT-1) {
is_connected[parent_id] = true;
}
root_id = parent_id;
for (long long j = i; j < end; j ++) {
uint code_id = hash_array[j].second;
if (code_id == parent_id) continue;
is_connected[code_id] = true;
// create the edges
edges.emplace_back(parent_id, code_id);
// cout << edges.size() << ". <" << parent_id << ", " << code_id << ">: ";
for (int m = 0; m < M; m ++) {
uint to = codes[GetIndex(M, code_id, m)];
uint from = codes[GetIndex(M, parent_id, m)];
if (from != to)
{
debug_diff_num_sum++;
// cout << "(" << from << ", " << to << ") ";
}
}
// cout << endl;
}
i = end - 1;
}
//cout << "sum of clique size is " << debug_clique_size_sum << endl;
//gettimeofday(&mid, NULL);
//cout << " combination used " <<mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//double vm, rss;
//process_mem_usage(vm, rss);
//cout << " partition linear opt approx VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
}
cout << "number of edges "<< edges.size() << endl;
cout << "number of diffs "<< debug_diff_num_sum << endl;
global_diff_sum += debug_diff_num_sum;
cout << "GLOBAL number of diffs "<< global_diff_sum << endl;
double vm, rss;
process_mem_usage(vm, rss);
cout << "find edge VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
gettimeofday(&all_en, NULL);
cout << " Find Edge uses: "
<< all_en.tv_sec - all_st.tv_sec + (all_en.tv_usec-all_st.tv_usec)/1e6
<< "sec" <<endl;
if (DIFF_BY == M) {
cout << " TOTAL number of Diffs " << global_diff_sum << endl;
}
}
*/
void partition_linear_opt_approx_with_constraint(const uchar* codes,
vector<uchar>& dummycodes, vector<bool>& dummymarks,
int M, int K, uint num_codes,
uint& global_num_dummy, int DIFF_BY, uint* parents,
uint* rank, vector<pair<uint, uint>>& edges,
DummyNodes& dummy_nodes, DummyNodes& next,
DummyNodes& finalists, uint& root_id,
vector<uchar>& heights, int MAX_HEIGHT)
{
int LOG_K = round(log2(K));
timeval beg, mid, mid1, end, all_st, all_en;
gettimeofday(&all_st, NULL);
cout << "Find diff = " << DIFF_BY << endl;
vector<vector<int>> combinations;
/** original method
assert(M >= DIFF_BY);
nchoosek(M, M - DIFF_BY, combinations);
*/
cout << " DIFF_BY = " << DIFF_BY << endl;
int nparts = M;
assert(nparts >= DIFF_BY);
nchoosek(nparts, nparts-DIFF_BY, combinations);
cout << "For loop begins " << get_current_time_str() << endl;
vector<pair<uint128_t, uint>> hash_array;
//hash_array.resize(dummy_nodes.size);
cout << combinations.size() << " combination(s)" << endl;
long long debug_clique_size_sum = 0;
long long debug_diff_num_sum = 0;
// mark the process nodes
vector<bool> is_merged(dummy_nodes.size, false);
vector<uint> node_ids(dummy_nodes.size,0);
for (auto k = 0; k < combinations.size(); k++) {
//if (num_codes >= 1000000000) cout << k << " " ;
gettimeofday(&beg, NULL);
long long num_active_codes = 0;
// get active code ids
for (long long l = 0; l < dummy_nodes.size; l ++) {
if (!is_merged[l]) {
node_ids[num_active_codes++] = l;
}
}
hash_array.resize(num_active_codes);
cout << k << " th combination with nodes number of " << num_active_codes << endl ;
#pragma omp parallel for
for (long long l = 0; l < num_active_codes; l ++) {
uint128_t hash = 0x0000000000000000ULL;
uint code_id = dummy_nodes.ids[node_ids[l]];
for (auto it = combinations[k].begin(); it != combinations[k].end();
it++) {
//hash |= (static_cast<uint128_t>(codes[GetIndex(M, code_id, *it)]) << (LOG_K * (*it)));
long start = (*it)*(M/nparts);
long end = (*it+1)*(M/nparts);
for (auto iter = start; iter < end; iter ++) {
int cid;
if (K > 256) {
cid = ((uint16_t*)codes)[GetIndex(M, code_id, iter)];
} else {
cid = codes[GetIndex(M, code_id, iter)];
}
hash |= (static_cast<uint128_t>(cid) << (LOG_K * (iter)));
}
}
//exit(0);
hash_array[l] = make_pair(hash, node_ids[l]);// put index of dummy_nodes here
// as push_back() in DummyNode required
}
//gettimeofday(&mid, NULL);
//cout << " calculate hash codes " <<mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//gettimeofday(&beg, NULL);
// sort the hash codes
// Explicitly force a call to parallel sort.
//cout << "hash array size " << hash_array.size() << endl;
__gnu_parallel::sort(hash_array.begin(), hash_array.end(),
[](const pair<uint128_t, uint32_t>& a,
const pair<uint128_t, uint32_t>&b) -> bool {
return a.first < b.first;
});
//gettimeofday(&mid, NULL);
//cout << " sort codes " << mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//gettimeofday(&beg, NULL);
// traverse hash array
for (uint i = 0; i < hash_array.size(); i ++) {
uint end = i+1;
for (; end < hash_array.size(); end++) {
if (hash_array[end].first != hash_array[i].first)
break;
}
if (end == i+1) {
continue;
}
debug_clique_size_sum += end-i;
// process the clique
// find the highest node as parent
int max_height = -1;
uint parent_node_id = 0;
for (long long j = i; j < end; j ++) {
uint code_id = dummy_nodes.ids[hash_array[j].second];
if (!is_active[code_id]) {
cout << "Wrong!!" << endl;
}
if ((int)(heights[code_id]) > max_height) {
max_height = heights[code_id];
parent_node_id = hash_array[j].second;
}
}
uint parent_code_id = dummy_nodes.ids[parent_node_id];
// find the second highest node
int second_height = 0;
for (long long j = i; j < end; j ++) {
uint code_id = dummy_nodes.ids[hash_array[j].second];
if (code_id == parent_code_id) continue;
if ((int)(heights[code_id]) > second_height) {
second_height = heights[code_id];
}
}
if (second_height == max_height) heights[parent_code_id] ++;
max_height ++;
if (max_height >= MAX_HEIGHT-2) {
finalists.push_back(parent_code_id);
is_merged[parent_node_id] = true;
}
root_id = dummy_nodes.ids[parent_node_id];
for (long long j = i; j < end; j ++) {
uint node_id = hash_array[j].second;
uint code_id = dummy_nodes.ids[node_id];
if (node_id == parent_node_id) continue;
is_merged[node_id] = true;
is_active[code_id] = false;
// create the edges
if (!is_active[parent_code_id]) {
cout << "Wrong " << endl;
}
edges.emplace_back(parent_code_id, code_id);
// cout << edges.size() << ". <" << parent_id << ", " << node_id << ">: ";
for (int m = 0; m < M; m ++) {
uint to = codes[GetIndex(M, code_id, m)];
uint from = codes[GetIndex(M, parent_code_id, m)];
if (from != to)
{
debug_diff_num_sum++;
}
}
// cout << endl;
}
i = end - 1;
}
//cout << "sum of clique size is " << debug_clique_size_sum << endl;
//gettimeofday(&mid, NULL);
//cout << " combination used " <<mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//double vm, rss;
//process_mem_usage(vm, rss);
//cout << " partition linear opt approx VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
}
// put nodes in "next"
for (uint i = 0; i < dummy_nodes.size; i ++) {
if (is_merged[i]) continue;
uint code_id = dummy_nodes.ids[i];
next.push_back(code_id);
}
cout << "number of edges "<< edges.size() << endl;
cout << "number of diffs "<< debug_diff_num_sum << endl;
global_diff_sum += debug_diff_num_sum;
cout << "GLOBAL number of diffs "<< global_diff_sum << endl;
double vm, rss;
process_mem_usage(vm, rss);
cout << "find edge VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
gettimeofday(&all_en, NULL);
cout << " Find Edge uses: "
<< all_en.tv_sec - all_st.tv_sec + (all_en.tv_usec-all_st.tv_usec)/1e6
<< "sec" <<endl;
}
void partition_linear_opt_approx_with_constraint_WOH(const uchar* codes,
vector<uchar>& dummycodes, vector<bool>& dummymarks,
int M, int K, uint num_codes,
uint& global_num_dummy, int DIFF_BY, uint* parents,
uint* rank, vector<pair<uint, uint>>& edges,
DummyNodes& dummy_nodes, DummyNodes& next,
DummyNodes& finalists, uint& root_id,
vector<uchar>& heights, int MAX_HEIGHT)
{
int LOG_K = round(log2(K));
timeval beg, mid, mid1, end, all_st, all_en;
gettimeofday(&all_st, NULL);
cout << "Find diff = " << DIFF_BY << endl;
vector<vector<int>> combinations;
/** original method
assert(M >= DIFF_BY);
nchoosek(M, M - DIFF_BY, combinations);
*/
cout << " DIFF_BY = " << DIFF_BY << endl;
int nparts = M;
assert(nparts >= DIFF_BY);
nchoosek(nparts, nparts-DIFF_BY, combinations);
cout << "For loop begins " << get_current_time_str() << endl;
vector<pair<uint128_t, uint>> hash_array;
//hash_array.resize(dummy_nodes.size);
cout << combinations.size() << " combination(s)" << endl;
long long debug_clique_size_sum = 0;
long long debug_diff_num_sum = 0;
// mark the process nodes
vector<bool> is_merged(dummy_nodes.size, false);
vector<uint> node_ids(dummy_nodes.size,0);
for (auto k = 0; k < combinations.size(); k++) {
//if (num_codes >= 1000000000) cout << k << " " ;
gettimeofday(&beg, NULL);
long long num_active_codes = 0;
// get active code ids
for (long long l = 0; l < dummy_nodes.size; l ++) {
if (!is_merged[l]) {
node_ids[num_active_codes++] = l;
}
}
hash_array.resize(num_active_codes);
cout << k << " th combination with nodes number of " << num_active_codes << endl ;
#pragma omp parallel for
for (long long l = 0; l < num_active_codes; l ++) {
uint128_t hash = 0x0000000000000000ULL;
uint code_id = dummy_nodes.ids[node_ids[l]];
for (auto it = combinations[k].begin(); it != combinations[k].end();
it++) {
//hash |= (static_cast<uint128_t>(codes[GetIndex(M, code_id, *it)]) << (LOG_K * (*it)));
long start = (*it)*(M/nparts);
long end = (*it+1)*(M/nparts);
for (auto iter = start; iter < end; iter ++) {
int cid;
if (K > 256) {
cid = ((uint16_t*)codes)[GetIndex(M, code_id, iter)];
} else {
cid = codes[GetIndex(M, code_id, iter)];
}
hash |= (static_cast<uint128_t>(cid) << (LOG_K * (iter)));
}
}
//exit(0);
hash_array[l] = make_pair(hash, node_ids[l]);// put index of dummy_nodes here
// as push_back() in DummyNode required
}
//gettimeofday(&mid, NULL);
//cout << " calculate hash codes " <<mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//gettimeofday(&beg, NULL);
// sort the hash codes
// Explicitly force a call to parallel sort.
//cout << "hash array size " << hash_array.size() << endl;
__gnu_parallel::sort(hash_array.begin(), hash_array.end(),
[](const pair<uint128_t, uint32_t>& a,
const pair<uint128_t, uint32_t>&b) -> bool {
return a.first < b.first;
});
//gettimeofday(&mid, NULL);
//cout << " sort codes " << mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//gettimeofday(&beg, NULL);
// traverse hash array
for (uint i = 0; i < hash_array.size(); i ++) {
uint end = i+1;
for (; end < hash_array.size(); end++) {
if (hash_array[end].first != hash_array[i].first)
break;
}
if (end == i+1) {
continue;
}
debug_clique_size_sum += end-i;
// process the clique
// Use the first node as the parent
uint parent_node_id = hash_array[i].second;
uint parent_code_id = dummy_nodes.ids[parent_node_id];
for (long long j = i+1; j < end; j ++) {
uint code_id = dummy_nodes.ids[hash_array[j].second];
if ((int)(heights[code_id])+1 > (int)(heights[parent_code_id])) {
heights[parent_code_id] = (int)(heights[code_id])+1;
}
}
//
if ((int)(heights[parent_code_id]) >= MAX_HEIGHT - 2) {
finalists.push_back(parent_code_id);
is_merged[parent_node_id] = true;
}
root_id = dummy_nodes.ids[parent_node_id];
for (long long j = i+1; j < end; j ++) {
uint node_id = hash_array[j].second;
uint code_id = dummy_nodes.ids[node_id];
is_merged[node_id] = true;
// create the edges
edges.emplace_back(parent_code_id, code_id);
// cout << edges.size() << ". <" << parent_id << ", " << node_id << ">: ";
for (int m = 0; m < M; m ++) {
uint to = codes[GetIndex(M, code_id, m)];
uint from = codes[GetIndex(M, parent_code_id, m)];
if (from != to)
{
debug_diff_num_sum++;
}
}
// cout << endl;
}
i = end - 1;
}
//cout << "sum of clique size is " << debug_clique_size_sum << endl;
//gettimeofday(&mid, NULL);
//cout << " combination used " <<mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//double vm, rss;
//process_mem_usage(vm, rss);
//cout << " partition linear opt approx VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
}
// put nodes in "next"
for (uint i = 0; i < dummy_nodes.size; i ++) {
if (is_merged[i]) continue;
uint code_id = dummy_nodes.ids[i];
next.push_back(code_id);
}
cout << "number of edges "<< edges.size() << endl;
cout << "number of diffs "<< debug_diff_num_sum << endl;
global_diff_sum += debug_diff_num_sum;
cout << "GLOBAL number of diffs "<< global_diff_sum << endl;
double vm, rss;
process_mem_usage(vm, rss);
cout << "find edge VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
gettimeofday(&all_en, NULL);
cout << " Find Edge uses: "
<< all_en.tv_sec - all_st.tv_sec + (all_en.tv_usec-all_st.tv_usec)/1e6
<< "sec" <<endl;
if (DIFF_BY == M) {
cout << " TOTAL number of Diffs " << global_diff_sum << endl;
}
}
/*
void partition_linear_opt_approx_clique_with_constraint_on_size(
const uchar* codes,
vector<uchar>& dummycodes, vector<bool>& dummymarks,
int M, int K, int nparts, uint num_codes,
uint& global_num_dummy, int DIFF_BY, uint* parents,
uint* rank, vector<pair<uint, uint>>& edges,
DummyNodes& dummy_nodes, DummyNodes& next,uint& root_id,
vector<uchar>& heights, int MAX_HEIGHT,
int min_clique_size, long& last_vec_id)
{
int LOG_K = round(log2(K));
cout << "number of edges "<< edges.size() << endl;
timeval beg, mid, mid1, end, all_st, all_en;
gettimeofday(&all_st, NULL);
cout << "Find diff = " << DIFF_BY << endl;
vector<vector<int>> combinations;
cout << "nparts = " << nparts << " DIFF_BY = " << DIFF_BY << endl;
assert(nparts >= DIFF_BY);
nchoosek(nparts, nparts-DIFF_BY, combinations);
cout << "For loop begins " << get_current_time_str() << endl;
vector<pair<uint128_t, uint>> hash_array;
//hash_array.resize(dummy_nodes.size);
cout << combinations.size() << " combination(s)" << endl;
long long debug_clique_size_sum = 0;
long long debug_diff_num_sum = 0;
// mark the process nodes
vector<bool> is_merged(num_codes*2);
for (long i = 0; i < num_codes*2; i ++) is_merged[i] = false;
long clique_idx = 0;
vector<uint> clique_sizes(0);
vector<long> clique_ids(num_codes, -1);
for (auto k = 0; k < combinations.size(); k++) {
hash_array.resize(0);
gettimeofday(&beg, NULL);
for (long long l = 0; l < dummy_nodes.size; l ++) {
int sp_count = 0;
uint128_t hash = 0x0000000000000000ULL;
uint node_id = dummy_nodes.ids[l];
if (is_merged[node_id] == true) continue;
for (auto it = combinations[k].begin(); it != combinations[k].end();
it++) {
//hash |= (static_cast<uint128_t>(codes[GetIndex(M, node_id, *it)]) << (LOG_K * (*it)));
if (nparts <= M) {
long start = (*it)*(M/nparts);
long end = (*it+1)*(M/nparts);
for (auto iter = start; iter < end; iter ++)
hash |= (static_cast<uint128_t>(codes[GetIndex(M, node_id, iter)]) << (LOG_K * (iter)));
} else {
int num_part = nparts/M;
int m = (*it) / num_part;
int part = (*it) % num_part;
int val = (uchar)(codes[GetIndex(M, node_id, m)]);
int val_length = LOG_K / num_part;
val = (val >> (val_length*part)) % ((1<<val_length)-1);
//cout << (*it) << endl;
//cout << bitset<8>(val) << endl;
hash |= (static_cast<uint128_t>(val) << (val_length* (*it)));
//cout << bitset<64>((uint64_t)hash) << endl;
}
}
//exit(0);
hash_array.emplace_back(hash, l);// put index of dummy_nodes here
// as push_back() in DummyNode required
}
//gettimeofday(&mid, NULL);
// sort the hash codes
// Explicitly force a call to parallel sort.
__gnu_parallel::sort(hash_array.begin(), hash_array.end(),
[](const pair<uint128_t, uint32_t>& a,
const pair<uint128_t, uint32_t>&b) -> bool {
return a.first < b.first;
});
//gettimeofday(&mid, NULL);
//cout << " sort codes " << mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//gettimeofday(&beg, NULL);
// traverse hash array
for (uint i = 0; i < hash_array.size(); i ++) {
uint end = i+1;
for (; end < hash_array.size(); end++) {
if (hash_array[end].first != hash_array[i].first)
break;
}
//if (end == i+1) {
if (end <= i + min_clique_size) {
continue;
}
debug_clique_size_sum += end - i;
uint clique_size = end - i;
uint cliq_id = clique_idx ++;
clique_sizes.emplace_back(clique_size);
for (uint iter = i; iter < end; iter ++) {
uint vec_id = dummy_nodes.ids[hash_array[iter].second];
if (clique_ids[vec_id] == -1) clique_ids[vec_id] = cliq_id;
uint vcid = clique_ids[vec_id];
if (clique_sizes[vcid] < clique_size)
clique_ids[vec_id] = cliq_id;
}
i = end - 1;
}
// cout << "sum of clique size is " << debug_clique_size_sum << endl;
//gettimeofday(&mid, NULL);
//cout << " combination used " <<mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
//double vm, rss;
//process_mem_usage(vm, rss);
//cout << " partition linear opt approx VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
}
// put nodes in "next"
// generate candidate cliques and merge cliques
vector<vector<uint>> cliques;
cliques.resize(clique_idx);
for (uint i = 0; i < dummy_nodes.size; i ++) {
uint vec_id = dummy_nodes.ids[i];
uint cliq_id = clique_ids[vec_id];
if (cliq_id == (uint)0-1) continue;
cliques[cliq_id].emplace_back(vec_id);
}
for (uint i = 0; i < cliques.size(); i ++) {
if (cliques[i].size() >= min_clique_size) {
// INFO
if (cliques[i].size() < info_max_size)
info_clique_sizes[cliques[i].size()]++;
// END OF INFO
for (uint vec_id : cliques[i]) {
if (last_vec_id == -1) {
// this is the first node in the first clique ever
last_vec_id = vec_id;
root_id = vec_id;
} else {
// create an edge with last_vec_id for each vector
edges.emplace_back(last_vec_id, vec_id);
for (int m = 0; m < M; m ++) {
uint to = codes[GetIndex(M, last_vec_id, m)];
uint from = codes[GetIndex(M, vec_id, m)];
if (from != to)
{
debug_diff_num_sum++;
}
}
}
is_merged[vec_id] = true;
}
last_vec_id = cliques[i][cliques[i].size()-1];
}
}
for (uint i = 0; i < dummy_nodes.size; i ++) {
uint node_id = dummy_nodes.ids[i];
if (is_merged[node_id]) continue;
next.push_back(node_id);
}
cout << "number of edges "<< edges.size() << endl;
cout << "number of diffs "<< debug_diff_num_sum << endl;
global_diff_sum += debug_diff_num_sum;
cout << "GLOBAL number of diffs "<< global_diff_sum << endl;
double vm, rss;
process_mem_usage(vm, rss);
cout << "find edge VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
gettimeofday(&all_en, NULL);
cout << " Find Edge uses: "
<< all_en.tv_sec - all_st.tv_sec + (all_en.tv_usec-all_st.tv_usec)/1e6
<< "sec" <<endl;
}
*/
// true: read from file
// false: newly created
bool create_approx_tree(const string &dataset_path, const uchar* codes,
vector<uchar>& dummycodes, vector<bool>& dummymarks,
int M, int K, uint num_codes, int diff_argument,
QNode* nodes, float** m_sub_distances_,
float** dist_tables, int max_height_folds,
int method = 1)
{
cout << "create_tree_index_approx() in create_tree_approx.h called " << endl;
m_sub_distances = m_sub_distances_;
mkk_tables = dist_tables;
pqcodes = codes;
if (num_codes >= INT_MAX) {
cout << "Number of codes is too large: " << num_codes << endl;
cout << "Exit" << endl;
exit(0);
}
if (codes == NULL) {
cout << "Please generate compressed codes first" << endl;
exit(0);
}
// transform codes into strips
const uchar* transformed_codes = codes;
uint num_dummies = 0;
// create_part_tree_index_approx(dataset_path, transformed_codes, dummycodes,
// dummymarks, M, K, num_codes,
// num_dummies, diff_argument,
// nodes, dist_tables);
{
cout << "Build trees by diffs " << endl;
timeval beg, mid, mid1, end;
gettimeofday(&beg, NULL);
vector<pair<uint, uint>> edges;
uint root_id;
//find_edges_by_diff(codes, M, K, num_codes, NUM_DIFF, edges);
find_edges_by_diff_approx(dataset_path, codes, dummycodes, dummymarks,
M, K, num_codes, num_dummies,
diff_argument, edges, dist_tables,
root_id, max_height_folds, method);
cout << "found " << edges.size() << " edges" << endl;
gettimeofday(&mid, NULL);
cout << " ++++find edges by diff in "
<<mid.tv_sec - beg.tv_sec + (mid.tv_usec - beg.tv_usec) / 1e6 << endl;
long long n_diffs = check_num_diffs(codes, M, K, num_codes, edges);
cout << dataset_path << endl;
if (nodes == NULL) {
cout << "QNodes were NULL" << endl;
nodes = new QNode[num_codes+1];
}
edges_to_tree_index_approx_dfs_layout(dataset_path, codes, dummycodes,
dummymarks, M, K, num_codes,
num_dummies, edges, nodes, root_id,
method);
cout << "--------------Qnodes is " << nodes << endl;
cout << "Building trees done" << endl;
double vm, rss;
process_mem_usage(vm, rss);
cout << "Build tree VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
// convert qnodes to compressed codes
// create compressed codes
// scan the qnodes array and fill out an array
// output number of codes as well
cout << "--------------Qnodes is " << nodes << endl;
//qnodes_to_compressed_codes(dataset_path, codes,
if (method < 4) {
// DTC method in paper
qnodes_to_compressed_codes_opt(dataset_path, codes,
M, K, num_codes, diff_argument, n_diffs,
nodes, m_sub_distances_, dist_tables, method);
} else {
// method = 4, block based design
qnodes_to_compressed_codes_opt_block_aware(dataset_path, codes,
M, K, num_codes, diff_argument, n_diffs,
nodes, m_sub_distances_, dist_tables);
}
// row store
//row_store_qnodes_to_compressed_codes_opt(dataset_path, codes,
// M, K, num_codes, diff_argument, n_diffs,
// nodes, m_sub_distances_, dist_tables, method);
}
uint num_nodes = num_codes;
//delete[] transformed_codes;
return false;
}
void edges_to_adj_lists_approx(const int num_codes, vector<pair<uint, uint>> &edges, vector<uint> &sparse_row, vector<uint> &offsets)
{
cout << "Total number of edges " << edges.size() << endl;
// create adjacent lists
// int n_edges = edges.size();
// for (auto i = 0; i < n_edges; i++)
// edges.emplace_back(edges[i].second, edges[i].first);
// sort edges
sort(edges.begin(), edges.end(), [](const pair<uint, uint>& a, const pair<uint, uint>& b) {
return a.first < b.first;
});
// sparse row contains all the "to"s
// offset[x] contains num of edges from x;
// edges are sorted first by from
sparse_row.clear(); // contains all tos
offsets.clear(); // vector index is from_id, contains starting point
vector<uint> num_neighbors(num_codes, 0);
for (auto i = 0; i < edges.size(); i++)
{
uint parent = edges[i].first;
uint child = edges[i].second;
num_neighbors[parent]++;
sparse_row.push_back(child);
}
uint idx = 0;
offsets.push_back(idx);
for (auto i = 0; i < num_codes; i++)
{
idx += num_neighbors[i];
offsets.push_back(idx);
}
}
// use dfs to find max dist
void dfs_find_max_dist(const uchar* codes, vector<uint>& sparse_row, vector<uint>& offsets,
uint source, uint vec_id, float& max_dist, int K) {
uint start = offsets[vec_id], end = offsets[vec_id+1];
for (uint it = start; it < end; it ++) {
uint child_vid = sparse_row[it];
float dist = cal_distance_by_tables(source, sparse_row[it], mkk_tables,
codes, K);
if (dist > max_dist) max_dist = dist;
dfs_find_max_dist(codes, sparse_row, offsets, source, sparse_row[it], max_dist, K);
}
}
//void dfs_wnode_layout_only(uint& node_id, uint parent_vid,
// vector<uint>& sparse_row, vector<uint>& offsets,
// int M) {
// uint start = offsets[parent_vid], end = offsets[parent_vid+1];
// for (uint it = start; it < end; it ++) {
// node_id = node_id + 1;
// uint child_id = sparse_row[it];
// for (int i = 0; i < M; i ++) {
// wnodes[node_id].code[i] = pqcodes[GetIndex(M, child_id, i)];
// }
// dfs_wnode_layout_only(node_id, child_id, sparse_row, offsets,M);
// }
//}
//void dfs_cnode_layout_only(uint& node_id, uint parent_vid,
// vector<uint>& sparse_row, vector<uint>& offsets,
// uchar depth, int M) {
// uint start = offsets[parent_vid], end = offsets[parent_vid+1];
// depth = depth + 1;
// for (uint it = start; it < end; it ++) {
// node_id = node_id + 1;
// uint child_id = sparse_row[it];
// cnodes[node_id].depth = depth;
// uchar num_diff = 0;
// for (int m = 0; m < M; m ++) {
// uint from = pqcodes[GetIndex(M, parent_vid, m)];
// uint to = pqcodes[GetIndex(M, child_id, m)];
// if (from != to) {
// cnodes[node_id].diffs[num_diff].m = m;
// cnodes[node_id].diffs[num_diff].from = from;
// cnodes[node_id].diffs[num_diff].to = to;
// num_diff ++;
// }
// }
// cnodes[node_id].diff_num = num_diff;
// dfs_cnode_layout_only(node_id, child_id, sparse_row, offsets,depth,M);
// }
//}
void dfs_node_layout(QNode* nodes, uint& node_id, uint parent_vid,
vector<uint>& sparse_row, vector<uint>& offsets,
uchar depth, int M) {
uint parent_node_id = node_id;
nodes[parent_node_id].child_pos_start = node_id + 1;
uint start = offsets[parent_vid], end = offsets[parent_vid+1];
depth = depth + 1;
for (uint it = start; it < end; it ++) {
node_id = node_id + 1;
uint child_id = sparse_row[it];
nodes[node_id].set_id_parent_pos(child_id, parent_node_id);
nodes[node_id].depth = depth;
uchar num_diff = 0;
for (int m = 0; m < M; m ++) {
uint from = pqcodes[GetIndex(M, parent_vid, m)];
uint to = pqcodes[GetIndex(M, child_id, m)];
if (from != to) {
nodes[node_id].diffs[num_diff].m = m;
nodes[node_id].diffs[num_diff].from = from;
nodes[node_id].diffs[num_diff].to = to;
num_diff ++;
}
}
nodes[node_id].diff_num = num_diff;
dfs_node_layout(nodes, node_id, child_id, sparse_row, offsets, depth, M);
}
nodes[parent_node_id].child_num = node_id - parent_node_id;
}
//void read_tree_index_file_approx(const string &file_name,
// QNode* nodes)
//{
// ifstream ifs(file_name, ios::binary);
// if (!ifs.is_open()) {
// cerr << "Error: cannot open " << file_name << ends;
// assert(0);
// }
//
// uint num_nodes = 0;
// ifs.read(reinterpret_cast<char*> (&num_nodes), sizeof(uint));
//
// ifs.read((char*)&(nodes[0]),sizeof(QNode)*(num_nodes+1));
//
// ifs.close();
//
// double vm, rss;
// process_mem_usage(vm, rss);
// cout << "read file VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
//}
void find_edges_by_diff_approx(const string &dataset_path, const uchar* codes,
vector<uchar>& dummycodes,
vector<bool>& dummymarks, int M, int K,
uint num_codes, uint& global_num_dummy,
int diff_argument, vector<pair<uint, uint>>& edges,
float** dist_tables, uint& root_id,
int max_height_folds, int method)
{
string file_name = dataset_path + "/M" + to_string(PQ_M) + "K" + to_string(K)
+ "H" + to_string(max_height_folds)
+ "_Approx_Edges";
if (with_id) file_name = file_name + "_with_id";
switch (method) {
case 1:
break;
case 2:
file_name = file_name + "_WOH";
break;
case 3:
file_name = file_name + "_clique";
break;
}
file_name = file_name + "_N" + to_string(num_codes);
if (exists_test3(file_name)) {
ifstream ifs(file_name, ios::binary);
if (!ifs.is_open()) {
cerr << "Error: cannot open " << file_name << ends;
assert(0);
}
// load tree from file
ifs.read(reinterpret_cast<char*> (&root_id), sizeof(uint));
edges.resize(num_codes-1);
ifs.read( (char*) &(edges[0]), sizeof(pair<uint, uint>) * edges.size());
cout << "Edges are read from file " << file_name << endl;
return ;
}
cout << "Find_edges start ------------ " << get_current_time_str() << endl;
uint* parents=NULL;
uint* rank=NULL;
uint max_num_nodes = num_codes*2;
uint globa_num_dummy = 0;
// prepare dummy nodes
DummyNodes nodes_0;
nodes_0.init_nodes(codes, num_codes, M);
DummyNodes nodes_1;
nodes_1.init(M);
DummyNodes& dummy_nodes = nodes_0;
DummyNodes& next = nodes_1;
DummyNodes finalists;
vector<uchar> heights(num_codes, 0);
vector<bool> is_connected(num_codes, 0);
is_active = vector<bool>(num_codes, true);
int MAX_HEIGHTS = M * max_height_folds;
for (int diff = 0; diff <= diff_argument; diff ++) {
//partition_linear_opt_approx_with_constraint_bitset(codes, dummycodes,
// dummymarks, M, K, num_codes, global_num_dummy,
// diff, parents, rank, edges, is_connected, root_id,
// heights, MAX_HEIGHTS);
switch (method) {
case 1:
partition_linear_opt_approx_with_constraint(codes, dummycodes,
dummymarks, M, K, num_codes, global_num_dummy,
diff, parents, rank, edges, dummy_nodes, next,finalists,
root_id, heights, MAX_HEIGHTS);
break;
case 2:
partition_linear_opt_approx_with_constraint_WOH(codes, dummycodes,
dummymarks, M, K, num_codes, global_num_dummy,
diff, parents, rank, edges, dummy_nodes, next,finalists,
root_id, heights, MAX_HEIGHTS);
break;
}
DummyNodes& tmp = dummy_nodes;
dummy_nodes = next;
next = tmp;
next.clear();
// the termination condition
if (dummy_nodes.size <= 1) break;
cout << "dummy_nodes.size = " << dummy_nodes.size << endl;
}
cout << "root id after enumerating combinations " << root_id << endl;
if (dummy_nodes.size > 0) {
finalists.push_back(dummy_nodes.ids[0]); // there will only be 1 node left
}
// handle the roots with height of MAX_HEIGHT - 2
cout << "finalists size is " << finalists.size << " edges size " << edges.size() << endl;
if (finalists.size > 0) {
uint parent_code_id = finalists.ids[0];
root_id = parent_code_id;
for (long long i = 1; i < finalists.size; i ++) {
uint code_id = finalists.ids[i];
// create the edges
edges.emplace_back(parent_code_id, code_id);
for (int m = 0; m < M; m ++) {
uint to = codes[GetIndex(M, code_id, m)];
uint from = codes[GetIndex(M, parent_code_id, m)];
if (from != to)
{
global_diff_sum++;
}
}
}
}
cout << "number of edges is " << edges.size() << endl;
cout << " ++++ TOTAL number of Diffs " << global_diff_sum << endl;
delete[] parents;
delete[] rank;
// write edges to file
ofstream ofs(file_name, ios::binary);
if (!ofs.is_open()) {
cerr << "Error: cannot open " << file_name << ends;
assert(0);
}
cout << file_name << " " << " opened" << endl;
ofs.write(reinterpret_cast<char*> (&root_id), sizeof(uint));
ofs.write((char*) &(edges[0]), sizeof(pair<uint, uint>) * edges.size());
ofs.close();
cout << file_name << " " << " written and closed" << endl;
cout << "Find_edges end ------------- " << get_current_time_str() << endl;
}
void edges_to_tree_index_approx_dfs_layout(const string &dataset_path,
const uchar* codes, vector<uchar>& dummycodes,
vector<bool>& dummymarks, int M, int K,
uint num_codes, uint global_num_dummy,
vector<pair<uint,uint>>& edges,
QNode* nodes, uint& root_id, int method)
{
cout << "Edges to tree index..." << endl;
// check if nodes have been stored
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_TreeNodesDFS";
if (with_id) file_name = file_name + "_with_id";
switch (method) {
case 1:
break;
case 2:
file_name = file_name + "_WOH";
break;
case 3:
file_name = file_name + "_clique";
break;
}
file_name = file_name + "_N" + to_string(num_codes);
if (exists_test3(file_name)) {
ifstream ifs(file_name, ios::binary);
if (!ifs.is_open()) {
cerr << "Error: cannot open " << file_name << ends;
assert(0);
}
// load tree from file
ifs.read( (char*) &(nodes[0]), sizeof(QNode) * (num_codes+1));
cout << "Nodes are read from file " << file_name << endl;
return ;
}
cout << "Creating tree index..." << endl;
vector<uint> sparse_row;
vector<uint> offsets;
long long n_diffs = 0;
uint* parents = new uint[num_codes];
for (uint i = 0; i < num_codes; i ++) parents[i] = (uint)0-1;
for (auto edge : edges) {
parents[edge.second] = edge.first;
}
edges_to_adj_lists_approx(num_codes, edges, sparse_row, offsets);
edges.resize(0);
// get the max distance to sub tree
float* max_dists = new float[num_codes];
memset(max_dists, 0, sizeof(float)*num_codes);
float* max_dist2p = new float[num_codes];
memset(max_dist2p, 0, sizeof(float)*num_codes);
// calculate max distances
if (m_sub_distances == NULL) {
cout << "In function edges_to_tree_index_approx() : " << endl
<< " m_sub_distances is NULL" << endl;
exit(0);
}
cout << " num_codes = " << num_codes << endl;
cout << "root id is " << root_id << endl;
for (uint vid = 0; vid < num_codes; vid ++) {
//cout << vid << " " << endl;;
uint parent = parents[vid];
uint prev_parent = vid;
int depth = 0;
while (parent != (uint)0-1) {
//cout << parent << " depth " << depth << " ";
if (depth++ >= 16) {
cout << endl;
break;
}
float dist = cal_distance_by_tables(vid, parent,
mkk_tables, codes, K);
if (dist > max_dists[parent]) {
max_dists[parent] = dist;
}
if (dist > max_dist2p[prev_parent])
max_dist2p[prev_parent] = dist;
prev_parent = parent;
parent = parents[parent];
}
}
cout << "max dist to ancestors done" << endl;
// sort the children for each node
for (uint vid = 0; vid < num_codes; vid ++) {
uint start = offsets[vid], end = offsets[vid+1];
sort(sparse_row.begin()+start, sparse_row.begin()+end, [max_dist2p](const uint a, const uint b) {
return max_dist2p[a] > max_dist2p[b];
});
}
// build the tree again with new order
// reset QNode array
memset(nodes, 0, sizeof(QNode)*(num_codes+1));
for (uint i = 0; i < num_codes + 1; i ++) {
nodes[i].sub_tree_size = 1;
}
cout << "Root id is " << root_id << endl;
// set up root diffs
for (int m = 0; m < M; m ++) {
nodes[0].diffs[m].m = m;
nodes[0].diffs[m].from = -1;
nodes[0].diffs[m].to = codes[GetIndex(M, root_id, m)];
}
nodes[0].vec_id = root_id;
nodes[0].diff_num = M;
nodes[0].parent_pos = -1;
nodes[0].depth = 0;
uint node_id = 0;
dfs_node_layout(nodes, node_id, root_id, sparse_row, offsets, nodes[0].depth, M);
cout << "Root info " << nodes[0].child_pos_start << " "
<< nodes[0].child_num << endl;
long long debug_n_diffs = 0;
for (uint pos = 0; pos < num_codes; pos ++) {
//nodes[pos].sub_tree_size ++;
nodes[pos].max_dist = sqrt(max_dists[nodes[pos].vec_id]);
nodes[pos].max_dist2p = sqrt(max_dist2p[nodes[pos].vec_id]);
if (pos > 0) debug_n_diffs += nodes[pos].diff_num;
}
cout << "debug n diffs is " << debug_n_diffs << endl;
// INFO get the histogram of depths distribution
uint* info_depth_counts = new uint[M+2];
memset(info_depth_counts, 0, sizeof(uint)*(M+2));
for (uint i = 0; i < num_codes; i ++) {
info_depth_counts[nodes[i].depth] ++;
}
for (int i = 0; i < M+2; i ++) {
cout << info_depth_counts[i] << " nodes at depth " << i << endl;
}
cout << "The sub tree size of root is " << nodes[0].sub_tree_size << endl;
cout << "The Max dist of root is " << nodes[0].max_dist<< endl;
// calculate maximum path distances and child range
cout << "TOTAL NUMBER OF DIFFS is " << n_diffs << endl;
cout <<"Build trees end -----------"<<get_current_time_str()<< endl;
//
// write edges to file
ofstream ofs(file_name, ios::binary);
if (!ofs.is_open()) {
cerr << "Error: cannot open " << file_name << ends;
assert(0);
}
cout << file_name << " " << " opened" << endl;
ofs.write((char*) &(nodes[0]), sizeof(QNode) * (num_codes+1));
ofs.close();
cout << "---------------Qnodes is " << nodes << endl;
}
void set_bit(uchar* bytes, long long offset, int val) {
long long byte_offset = offset / 8;
int bit_offset = offset % 8;
bytes[byte_offset] |= val << bit_offset;
}
inline int get_bit(uchar* bytes, long long offset) {
long long byte_offset = offset / 8;
int bit_offset = offset % 8;
return (bytes[byte_offset] >> bit_offset) & 1;
}
bool qnodes_to_compressed_codes(const string &dataset_path, const uchar* codes,
int M, int K, uint& num_codes, int diff_argument,
long long n_diffs,
QNode* nodes, float** m_sub_distances_,
float** dist_tables, int method)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_compressed_codes";
if (with_id) file_name = file_name + "_with_id";
switch (method) {
case 1:
break;
case 2:
file_name = file_name + "_WOH";
break;
case 3:
file_name = file_name + "_clique";
break;
}
file_name = file_name + "_N" + to_string(num_codes);
if (exists_test3(file_name)) {
cout << "REMINDER: DPNode file exists. No need to rebuild!" << endl
<< " " << file_name << endl;
return true;
}
cout << "qnodes_to_compressed_codes() in create_tree_approx.h called " << endl;
pqcodes = codes;
if (num_codes >= INT_MAX) {
cout << "Number of codes is too large: " << num_codes << endl;
cout << "Exit" << endl;
exit(0);
}
// Get QNodes first
read_qnodes_from_file(dataset_path, codes, M, K, num_codes,
diff_argument, nodes, method);
long long n_bits = n_diffs * 8 + 11 * (long long)num_codes + 64;// 64 is the root code
long long n_bytes = n_bits/8;
if (n_bits % 8 != 0) n_bytes ++;
uchar* compressed_codes = new uchar[n_bytes];
memset(compressed_codes, 0, sizeof(uchar)*n_bytes);
cout << "number of bits is " << n_bits << endl;
// traverse the QNodes and set data in bits
long long bit_offset = 0;
int last_depth = -1;
for (uint i = 0; i < num_codes; i ++) {
QNode& node = nodes[i];
int depth = node.depth;
if (depth > last_depth) {
// if (depth - last_depth != 1) {
// cout << "Depth is wrong!!!!!!!!!!" << endl;
// } else {
// }
// cout << "depth increase by " << depth - last_depth << endl;
}
last_depth = depth;
// set depth first
if (depth >= 8) {
cout << "depth is " << depth << endl;
exit(0);
}
for (int j = 0; j < 3; j ++) {
set_bit(compressed_codes, bit_offset++, (depth>>j)&1);
}
// set bit map
uchar bit_map = 0;
for (int j = 0; j < node.diff_num; j ++) {
bit_map = bit_map | (1 << node.diffs[j].m);
}
if (i < 3) cout << "ndiff " << (int)node.diff_num << " " << bitset<8>(bit_map) << endl;
for (int j = 0; j < 8; j ++) { // 8 = log(L)
set_bit(compressed_codes, bit_offset++, (bit_map>>j)&1);
}
// set "to"
for (int j = 0; j < node.diff_num; j ++) { // 8 = log(l)
uchar cid = node.diffs[j].to;
if (i < 3) {
cout <<" to: " << bitset<8>(cid) << " " << endl;;
}
for (int k = 0; k < 8; k ++) {
set_bit(compressed_codes, bit_offset++, (cid>>k)&1);
}
}
}
cout << "bit offset is " << bit_offset << endl;
ofstream ofs(file_name, ios::binary);
if (!ofs.is_open() ) {
cerr<<"Error: cannot open" << file_name << ends;
assert(0);
}
for (int i = 0; i < 20; i ++) {
cout << bitset<8>(compressed_codes[i]) << endl;
}
cout << file_name << " " << " created" << endl;
ofs.write((char*) &(n_bits), sizeof(long long));
ofs.write((char*) &(compressed_codes[0]), sizeof(uchar) * (n_bytes));
ofs.close();
return false;
}
bool row_store_qnodes_to_compressed_codes_opt(const string &dataset_path, const uchar* codes,
int M, int K, uint& num_codes, int diff_argument, long long n_diffs,
QNode *nodes, float** m_sub_distances_,
float** dist_tables, int method)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_compressed_codes_opt";
if (with_id) file_name = file_name + "_with_id";
switch (method) {
case 1:
break;
case 2:
file_name = file_name + "_WOH";
break;
case 3:
file_name = file_name + "_clique";
break;
}
file_name = file_name + "_N" + to_string(num_codes);
file_name = file_name + "_row_store";
if (exists_test3(file_name)) {
cout << "REMINDER: DPNode file exists. No need to rebuild!" << endl
<< " " << file_name << endl;
return true;
}
cout << "row_store_qnodes_to_compressed_codes() in create_tree_approx.h called " << endl;
pqcodes = codes;
if (num_codes >= INT_MAX) {
cout << "Number of codes is too large: " << num_codes << endl;
cout << "Exit" << endl;
exit(0);
}
// Get QNodes first
read_qnodes_from_file(dataset_path, codes, M, K, num_codes,
diff_argument, nodes, method);
long long n_bytes = 8 + n_diffs + (3*((long long)num_codes-1)+1)/2;// 8 is the root code
// row store
if (ext == "fvecs") {
n_bytes += (long long)num_codes * 4 * dim;
} else { // bvecs
n_bytes += (long long)num_codes * dim;
}// end row store
double vm, rss;
process_mem_usage(vm, rss);
cout << "Before allocating compressed codes array VM: " << vm << " KB; RSS: " << rss <<" KB" <<endl;
cout << "number of bytes is " << n_bytes << endl;
uchar* compressed_codes = new uchar[n_bytes];
memset(compressed_codes, 0, sizeof(uchar)*n_bytes);
// traverse the QNodes and set data in bits
long long byte_offset = 0;
for (int m = 0; m < M; m ++) {
compressed_codes[byte_offset++] = nodes[0].diffs[m].to;
}
uint i = 1;
long long debug_n_diffs = 0;
for ( ; i < num_codes - 1; i += 2) {
QNode& node1 = nodes[i];
QNode& node2 = nodes[i+1];
debug_n_diffs += (node1.diff_num + node2.diff_num);
// set depth first
uchar depths = node1.depth;
depths = depths | ((node2.depth) << 4);
if (i < 10) {
cout << "depth1 = " << bitset<8>(node1.depth) << " depth2 = "
<< bitset<8>(node2.depth)
<< " depths = " << bitset<8>(depths) << endl;
}
compressed_codes[byte_offset++] = depths;
// -------- write node1
// set bit map
uchar bit_map = 0;
for (int j = 0; j < node1.diff_num; j ++) {
bit_map = bit_map | (1 << node1.diffs[j].m);
}
compressed_codes[byte_offset++] = bit_map;
// set "to"
for (int j = 0; j < node1.diff_num; j ++) { // 8 = log(l)
uchar cid = node1.diffs[j].to;
compressed_codes[byte_offset++] = cid;
}
// -------- write node2
bit_map = 0;
for (int j = 0; j < node2.diff_num; j ++) {
bit_map = bit_map | (1 << node2.diffs[j].m);
}
compressed_codes[byte_offset++] = bit_map;
// set "to"
for (int j = 0; j < node2.diff_num; j ++) {
uchar cid = node2.diffs[j].to;
compressed_codes[byte_offset++] = cid;
}
// row store
if (ext == "fvecs") {
byte_offset += 4*dim*2;
} else { // bvecs
byte_offset += dim*2;
} // end row store
}
if ( i == num_codes - 1) {
cout << " one code left byte_offset is " << byte_offset << endl;
// a depth takes 8 bit
compressed_codes[byte_offset++] = nodes[i].depth;
uchar bit_map = 0;
for (int j = 0; j < nodes[i].diff_num; j ++) {
bit_map = bit_map | (1 << nodes[i].diffs[j].m);
}
compressed_codes[byte_offset++] = bit_map;
for (int j = 0; j < nodes[i].diff_num; j ++) {
compressed_codes[byte_offset++] = nodes[i].diffs[j].to;
}
debug_n_diffs += nodes[i].diff_num;
}
cout << "debug_n_diffs " << debug_n_diffs << endl;
cout << "byte offset is " << byte_offset << endl;
ofstream ofs(file_name, ios::binary);
if (!ofs.is_open() ) {
cerr<<"Error: cannot open" << file_name << ends;
assert(0);
}
for (int i = 0; i < 20; i ++) {
cout << bitset<8>(compressed_codes[i])
<< " " << (int)compressed_codes[i] << endl;
}
cout << file_name << " " << " created" << endl;
long long n_codes = num_codes;
ofs.write((char*) &(n_codes), sizeof(long long));
ofs.write((char*) &(n_bytes), sizeof(long long));
ofs.write((char*) &(compressed_codes[0]), sizeof(uchar) * (n_bytes));
ofs.close();
return false;
}
bool qnodes_to_compressed_codes_opt(const string &dataset_path, const uchar* codes,
int M, int K, uint& num_codes, int diff_argument, long long n_diffs,
QNode *nodes, float** m_sub_distances_,
float** dist_tables, int method)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_compressed_codes_opt";
if (with_id) file_name = file_name + "_with_id";
switch (method) {
case 1:
break;
case 2:
file_name = file_name + "_WOH";
break;
case 3:
file_name = file_name + "_clique";
break;
}
file_name = file_name + "_N" + to_string(num_codes);
if (exists_test3(file_name)) {
cout << "REMINDER: DPNode file exists. No need to rebuild!" << endl
<< " " << file_name << endl;
return true;
}
cout << "qnodes_to_compressed_codes() in create_tree_approx.h called " << endl;
pqcodes = codes;
if (num_codes >= INT_MAX) {
cout << "Number of codes is too large: " << num_codes << endl;
cout << "Exit" << endl;
exit(0);
}
// Get QNodes first
read_qnodes_from_file(dataset_path, codes, M, K, num_codes,
diff_argument, nodes, method);
long long n_bytes = 8 + n_diffs + (3*((long long)num_codes-1)+1)/2;// 8 is the root code
uchar* compressed_codes = new uchar[n_bytes];
memset(compressed_codes, 0, sizeof(uchar)*n_bytes);
cout << "number of bytes is " << n_bytes << endl;
// traverse the QNodes and set data in bits
long long byte_offset = 0;
for (int m = 0; m < M; m ++) {
compressed_codes[byte_offset++] = nodes[0].diffs[m].to;
}
uint i = 1;
long long debug_n_diffs = 0;
for ( ; i < num_codes - 1; i += 2) {
QNode& node1 = nodes[i];
QNode& node2 = nodes[i+1];
debug_n_diffs += (node1.diff_num + node2.diff_num);
// set depth first
uchar depths = node1.depth;
depths = depths | ((node2.depth) << 4);
if (i < 10) {
cout << "depth1 = " << bitset<8>(node1.depth) << " depth2 = "
<< bitset<8>(node2.depth)
<< " depths = " << bitset<8>(depths) << endl;
}
compressed_codes[byte_offset++] = depths;
// -------- write node1
// set bit map
uchar bit_map = 0;
for (int j = 0; j < node1.diff_num; j ++) {
bit_map = bit_map | (1 << node1.diffs[j].m);
}
compressed_codes[byte_offset++] = bit_map;
// set "to"
for (int j = 0; j < node1.diff_num; j ++) { // 8 = log(l)
uchar cid = node1.diffs[j].to;
compressed_codes[byte_offset++] = cid;
}
// -------- write node2
bit_map = 0;
for (int j = 0; j < node2.diff_num; j ++) {
bit_map = bit_map | (1 << node2.diffs[j].m);
}
compressed_codes[byte_offset++] = bit_map;
// set "to"
for (int j = 0; j < node2.diff_num; j ++) {
uchar cid = node2.diffs[j].to;
compressed_codes[byte_offset++] = cid;
}
}
if ( i == num_codes - 1) {
cout << " one code left byte_offset is " << byte_offset << endl;
// a depth takes 8 bit
compressed_codes[byte_offset++] = nodes[i].depth;
uchar bit_map = 0;
for (int j = 0; j < nodes[i].diff_num; j ++) {
bit_map = bit_map | (1 << nodes[i].diffs[j].m);
}
compressed_codes[byte_offset++] = bit_map;
for (int j = 0; j < nodes[i].diff_num; j ++) {
compressed_codes[byte_offset++] = nodes[i].diffs[j].to;
}
debug_n_diffs += nodes[i].diff_num;
}
cout << "debug_n_diffs " << debug_n_diffs << endl;
cout << "byte offset is " << byte_offset << endl;
ofstream ofs(file_name, ios::binary);
if (!ofs.is_open() ) {
cerr<<"Error: cannot open" << file_name << ends;
assert(0);
}
for (int i = 0; i < 20; i ++) {
cout << bitset<8>(compressed_codes[i])
<< " " << (int)compressed_codes[i] << endl;
}
cout << file_name << " " << " created" << endl;
long long n_codes = num_codes;
ofs.write((char*) &(n_codes), sizeof(long long));
ofs.write((char*) &(n_bytes), sizeof(long long));
ofs.write((char*) &(compressed_codes[0]), sizeof(uchar) * (n_bytes));
ofs.close();
return false;
}
bool qnodes_to_compressed_codes_opt_block_aware(const string &dataset_path, const uchar* codes,
int M, int K, uint& num_codes, int diff_argument, long long n_diffs,
QNode *nodes, float** m_sub_distances_,
float** dist_tables)
{
cout << "qnodes_to_compressed_codes() in deltapq_create_tree_approx.h called " << endl;
pqcodes = codes;
if (num_codes >= INT_MAX) {
cout << "Number of codes is too large: " << num_codes << endl;
cout << "Exit" << endl;
exit(0);
}
// Get QNodes first
read_qnodes_from_file(dataset_path, codes, M, K, num_codes,
diff_argument, nodes, 1);
long long n_bytes = 8 + n_diffs + (3*((long long)num_codes-1)+1)/2;// 8 is the root code
uchar* compressed_codes = new uchar[n_bytes];
memset(compressed_codes, 0, sizeof(uchar)*n_bytes);
cout << "number of bytes is " << n_bytes << endl;
// traverse the QNodes and set data in bits
long long byte_offset = 0;
for (int m = 0; m < M; m ++) {
compressed_codes[byte_offset++] = nodes[0].diffs[m].to;
}
uint i = 1;
long long debug_n_diffs = 0;
int block_size_bits = 4096*8;
long int n_blocks_used = 0;
int block_offset = 0;
block_offset += 10; // 10 bits for number of nodes in this block, max value is 4096*8/10
for ( ; i < num_codes - 1; i += 2) {
QNode& node1 = nodes[i];
QNode& node2 = nodes[i+1];
debug_n_diffs += (node1.diff_num + node2.diff_num);
// calculate block usage for node1
int expected_bits = 0;
expected_bits += 10; // bitmap and two bits
expected_bits += node1.diff_num * 8;
if (block_offset + expected_bits > block_size_bits) {
block_offset = 10 + expected_bits;
n_blocks_used ++;
} else {
block_offset += expected_bits;
}
// calculate block usage for node2
expected_bits = 0;
expected_bits += 10; // bitmap and two bits
expected_bits += node2.diff_num * 8;
if (block_offset + expected_bits > block_size_bits) {
block_offset = 10 + expected_bits;
n_blocks_used ++;
} else {
block_offset += expected_bits;
}
// set depth first
uchar depths = node1.depth;
depths = depths | ((node2.depth) << 4);
if (i < 10) {
cout << "depth1 = " << bitset<8>(node1.depth) << " depth2 = "
<< bitset<8>(node2.depth)
<< " depths = " << bitset<8>(depths) << endl;
}
compressed_codes[byte_offset++] = depths;
// -------- write node1
// set bit map
uchar bit_map = 0;
for (int j = 0; j < node1.diff_num; j ++) {
bit_map = bit_map | (1 << node1.diffs[j].m);
}
compressed_codes[byte_offset++] = bit_map;
// set "to"
for (int j = 0; j < node1.diff_num; j ++) { // 8 = log(l)
uchar cid = node1.diffs[j].to;
compressed_codes[byte_offset++] = cid;
}
// -------- write node2
bit_map = 0;
for (int j = 0; j < node2.diff_num; j ++) {
bit_map = bit_map | (1 << node2.diffs[j].m);
}
compressed_codes[byte_offset++] = bit_map;
// set "to"
for (int j = 0; j < node2.diff_num; j ++) {
uchar cid = node2.diffs[j].to;
compressed_codes[byte_offset++] = cid;
}
}
cout << "number of blocks used = " << n_blocks_used << endl;
exit(0);
if ( i == num_codes - 1) {
cout << " one code left byte_offset is " << byte_offset << endl;
// a depth takes 8 bit
compressed_codes[byte_offset++] = nodes[i].depth;
uchar bit_map = 0;
for (int j = 0; j < nodes[i].diff_num; j ++) {
bit_map = bit_map | (1 << nodes[i].diffs[j].m);
}
compressed_codes[byte_offset++] = bit_map;
for (int j = 0; j < nodes[i].diff_num; j ++) {
compressed_codes[byte_offset++] = nodes[i].diffs[j].to;
}
debug_n_diffs += nodes[i].diff_num;
}
cout << "debug_n_diffs " << debug_n_diffs << endl;
cout << "byte offset is " << byte_offset << endl;
// ofstream ofs(file_name, ios::binary);
// if (!ofs.is_open() ) {
// cerr<<"Error: cannot open" << file_name << ends;
// assert(0);
// }
// for (int i = 0; i < 20; i ++) {
// cout << bitset<8>(compressed_codes[i])
// << " " << (int)compressed_codes[i] << endl;
// }
// cout << file_name << " " << " created" << endl;
// long long n_codes = num_codes;
// ofs.write((char*) &(n_codes), sizeof(long long));
// ofs.write((char*) &(n_bytes), sizeof(long long));
// ofs.write((char*) &(compressed_codes[0]), sizeof(uchar) * (n_bytes));
// ofs.close();
return false;
}
bool create_diff_index(const string &dataset_path, const uchar* codes,
int M, int K, uint& num_codes)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_diff_index";
file_name = file_name + "_N" + to_string(num_codes);
if (exists_test3(file_name)) {
cout << "REMINDER: diff_index file exists. No need to rebuild!" << endl
<< " " << file_name << endl;
return true;
}
cout << "create_diff_index() in create_tree_approx.h called " << endl;
pqcodes = codes;
long long n_diffs = 8;
for (long long i = 1 ; i < num_codes; i ++) {
for (int m = 0; m < M; m ++) {
if (K <= 256) {
if (codes[i*M+m] != codes[(i-1)*M+m]) n_diffs ++;
} else {
if (((uint16_t*)codes)[i*M+m] != ((uint16_t*)codes)[(i-1)*M+m])
n_diffs ++;
}
}
}
cout << "number of diffs is " << n_diffs << endl;
long long n_bytes = 8 + n_diffs + num_codes-1;// 8 is the root code
if (K > 256) n_bytes += n_diffs;
uchar* diff_index = new uchar[n_bytes];
memset(diff_index, 0, sizeof(uchar)*n_bytes);
cout << "number of bytes is " << n_bytes << endl;
// traverse the QNodes and set data in bits
long long byte_offset = 0;
for (int m = 0; m < M; m ++) {
diff_index[byte_offset++] = codes[m];
}
long long i = 1;
long long debug_n_diffs = 0;
for ( ; i < num_codes ;i ++) {
// -------- write node1
// set bit map
uchar bit_map = 0;
for (int m = 0; m < M; m ++) {
if (K <= 256) {
if (codes[i*M+m] != codes[(i-1)*M+m])
bit_map = bit_map | (1 << m);
} else {
if (((uint16_t*)codes)[i*M+m] != ((uint16_t*)codes)[(i-1)*M+m])
bit_map = bit_map | (1 << m);
}
}
diff_index[byte_offset++] = bit_map;
// set "to"
for (int m = 0; m < M; m ++) {
if (K <=256) {
if (codes[i*M+m] != codes[(i-1)*M+m])
diff_index[byte_offset++] = codes[i*M+m];
} else {
if (((uint16_t*)codes)[i*M+m] != ((uint16_t*)codes)[(i-1)*M+m]){
diff_index[byte_offset++] = codes[(i*M+m)*2];
diff_index[byte_offset++] = codes[(i*M+m)*2+1];
}
}
}
}
cout << "byte offset is " << byte_offset << endl;
ofstream ofs(file_name, ios::binary);
if (!ofs.is_open() ) {
cerr<<"Error: cannot open" << file_name << ends;
assert(0);
}
for (int i = 0; i < 20; i ++) {
cout << bitset<8>(diff_index[i])
<< " " << (int)diff_index[i] << endl;
}
cout << file_name << " " << " created" << endl;
long long n_codes = num_codes;
ofs.write((char*) &(n_codes), sizeof(long long));
ofs.write((char*) &(n_bytes), sizeof(long long));
ofs.write((char*) &(diff_index[0]), sizeof(uchar) * (n_bytes));
ofs.close();
return false;
}
auto cmp_max = [](pair<float, uint>& left, pair<float, uint>& right) {
return (left.first) < (right.first);
};
inline int get_depth_from_compressed_codes(uchar* buffer,
long long& bit_offset, FILE* &fp_codes,
long long max_n_bytes) {
int depth = 0;
long long boffset = bit_offset % (BLKNBITS);
// cout << "in depth boffset is " << boffset << " bit_offset is "
// << bit_offset << endl;
if (boffset + 3 > BLKNBITS) {
// need to read a new block
// cout << "in get_depth =========== A New Block needed ===========" << endl;
int breakpoint = BLKNBITS - boffset;
for (int i = 0; i < breakpoint; i ++) {
int val = get_bit(buffer, boffset+i);
depth |= (val << i);
}
fread(buffer, sizeof(char),
min((long long)BLKNBS,
max_n_bytes-(bit_offset+breakpoint)/BYTELEN),
fp_codes);
for (int i = 0; i < 3 - breakpoint; i ++) {
int val = get_bit(buffer, i);
depth |= (val << (i+breakpoint));
}
bit_offset += 3;
return depth;
}
for (int i = 0; i < 3; i ++) {
int val = get_bit(buffer, boffset+i);
depth |= (val << i);
}
bit_offset += 3;
if (boffset + 3 == BLKNBITS) {
// cout << "in get_depth =========== A New Block needed after read ==========="
// << "block id " << (bit_offset+3) / (BLKNBITS)<< endl;
if (bit_offset < max_n_bytes*BYTELEN) {
fread(buffer, sizeof(char),
min((long long)BLKNBS, max_n_bytes-(bit_offset/BYTELEN)),
fp_codes);
}
}
return depth;
}
inline int get_bitmap_from_compressed_codes(uchar* buffer,
long long& bit_offset, FILE* &fp_codes,
long long max_n_bytes) {
int bitmap= 0;
long long boffset = bit_offset % (BLKNBITS);
// cout << "in bitmap boffset is " << boffset << " bit_offset is "
// << bit_offset << endl;
if (boffset + BYTELEN > BLKNBITS) {
// need to read a new block
// cout << "in get_bitmap =========== A New Block needed ==========="
// << "block id " << (bit_offset+BYTELEN) / (BLKNBITS)<< endl;
int breakpoint = BLKNBITS - boffset;
for (int i = 0; i < breakpoint; i ++) {
int val = get_bit(buffer, boffset+i);
bitmap |= (val << i);
}
fread(buffer, sizeof(char),
min((long long)BLKNBS,
max_n_bytes-(bit_offset+breakpoint)/BYTELEN),
fp_codes);
for (int i = 0; i < BYTELEN - breakpoint; i ++) {
int val = get_bit(buffer, i);
bitmap |= (val << (i+breakpoint));
}
bit_offset += BYTELEN;
return bitmap;
}
for (int i = 0; i < BYTELEN; i ++) {
int val = get_bit(buffer, boffset+i);
bitmap |= (val << i);
}
bit_offset += BYTELEN;
if (boffset + BYTELEN == BLKNBITS) {
// cout << "int get_bitmap =========== A New Block needed after read ==========="
// << "block id " << (bit_offset+BYTELEN) / (BLKNBITS)<< endl;
if (bit_offset < max_n_bytes*BYTELEN) {
fread(buffer, sizeof(char),
min((long long)BLKNBS, max_n_bytes-(bit_offset/BYTELEN)),
fp_codes);
}
}
return bitmap;
}
inline int get_cid_from_compressed_codes(uchar* buffer,
long long& bit_offset, FILE* &fp_codes,
long long max_n_bytes) {
int cid = 0;
long long boffset = bit_offset % (BLKNBITS);
// cout << "in cid boffset is " << boffset << " bit_offset is "
// << bit_offset << endl;
if (boffset + BYTELEN > BLKNBITS) {
//INFO
// cout << "int get_cid =========== A New Block needed ==========="
// << "block id " << (bit_offset+BYTELEN) / (BLKNBITS)<< endl;
// need to read a new block
int breakpoint = BLKNBITS - boffset;
for (int i = 0; i < breakpoint; i ++) {
int val = get_bit(buffer, boffset+i);
cid |= (val << i);
}
long long nbyte_read = min((long long)BLKNBS,
max_n_bytes-(bit_offset+breakpoint)/BYTELEN);
// cout << nbyte_read << " read " << endl;
fread(buffer, sizeof(char), nbyte_read, fp_codes);
for (int i = 0; i < BYTELEN - breakpoint; i ++) {
int val = get_bit(buffer, i);
cid |= (val << (i+breakpoint));
}
// //INFO
// for (int i = 0; i < 20; i ++) {
// cout << bitset<8>(buffer[i]) << endl;
// }
// // END OF INFO
bit_offset += BYTELEN;
return cid;
}
for (int i = 0; i < BYTELEN; i ++) {
int val = get_bit(buffer, boffset+i);
cid |= (val << i);
}
bit_offset += BYTELEN;
if (boffset + BYTELEN == BLKNBITS) {
// cout << "int get_cid =========== A New Block needed after read ==========="
// << "block id " << (bit_offset+BYTELEN) / (BLKNBITS)<< endl;
if (bit_offset < max_n_bytes*BYTELEN) {
fread(buffer, sizeof(char),
min((long long)BLKNBS, max_n_bytes-(bit_offset/BYTELEN)),
fp_codes);
}
}
return cid;
}
void query_processing_scan_compressed_codes(const string &dataset_path,
const vector<float> &query, int top_k, int M, int K,
int m_Ds, uint num_codes,
const vector<PQ::Array> &m_codewords,
vector<pair<int, float>> &results, uchar** decoder)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_compressed_codes";
file_name = file_name + "_N" + to_string(num_codes);
FILE* fp_codes;
uchar* buffer = new uchar[BLKNBS];
cout << file_name << endl;
fp_codes = fopen(file_name.c_str(), "r");
fread(buffer, sizeof(char), sizeof(long long), fp_codes);
long long n_bits = ((long long*)buffer)[0];
// cout << n_bits << endl;
long long n_bytes = n_bits/8 + ((n_bits%8==0) ? 0 : 1);
// cout << n_bytes << endl;
// cout << BLKNBITS<< endl;
// calculate distance lookup table
if (m_sub_distances == NULL) {
m_sub_distances = new float*[PQ_M]; // m_sub_distances defined in .h file
for (int i = 0; i < PQ_M; i++) {
m_sub_distances[i] = new float[PQ_K];
memset(m_sub_distances[i], 0, sizeof(float)*PQ_K);
}
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j ++) {
m_sub_distances[i][j] = .0;
for (int k = 0; k < m_Ds; k ++) {
m_sub_distances[i][j] += pow(m_codewords[i][j][k]
- query[i*m_Ds+k], 2);
}
}
}
// < <lowerbound, upperbound>, <distance, node_position> >
priority_queue<pair<float, uint>,
vector<pair<float, uint>>,
decltype(cmp_max)> max_heap(cmp_max);
// get first page of data
fread(buffer, sizeof(char), BLKNBS, fp_codes);
long long bit_offset=0;
double qdist = 0;
vector<vector<uchar>> vecs_stack(M, vector<uchar>(M, 0));
vector<double> dists_stack(M,0);
uchar root_bitmap = 0;
bit_offset += 11; // the first cid starts from offset 11
for (int m = 0; m < M; m ++) {
uchar cid = get_cid_from_compressed_codes(buffer, bit_offset,
fp_codes, n_bytes);
qdist += m_sub_distances[m][cid];
vecs_stack[0][m] = cid;
}
dists_stack[0] = qdist;
max_heap.push(make_pair(qdist, 0));
long long debug_diff_count = 0;
int last_depth = 0;
for (long i = 1; i < num_codes; i ++) {
// cout << "============== i = " << i << " ===============" << endl;
// get depth
int depth = get_depth_from_compressed_codes(buffer,
bit_offset, fp_codes, n_bytes);
if (last_depth < depth) {
if (last_depth + 1 != depth) {
cout << last_depth << " " << depth << endl;
exit(0);
}
}
last_depth = depth;
double dist = dists_stack[depth-1];
uchar bitmap = get_bitmap_from_compressed_codes(buffer,
bit_offset, fp_codes, n_bytes);
int n_diff = decoder[bitmap][0];
// copy vector from parent
for (int m = 0; m < M; m ++)
vecs_stack[depth][m] = vecs_stack[depth-1][m];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " ";
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_cid_from_compressed_codes(buffer,
bit_offset, fp_codes, n_bytes);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
// cout << endl;
dists_stack[depth] = dist;
// cout << dist << " bit_offset " << bit_offset << endl;
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i);
}
}
for (int i = top_k-1; i >= 0; i --) {
const pair<float, uint>& top = max_heap.top();
results[i].first = top.second;
results[i].second= top.first;
max_heap.pop();
}
// cout << "bit offset is " << bit_offset << endl;
}
inline int get_cid_from_pqcodes(uchar* buffer,
long long& byte_offset, FILE* &fp_codes,
long long max_n_bytes) {
int cid = 0;
long long boffset = byte_offset % (BLKNBS);
// cout << "in cid boffset is " << boffset << " bit_offset is "
// << bit_offset << endl;
cid = buffer[boffset];
byte_offset += 1;
if (boffset + 1 == BLKNBS) {
// cout << "int get_cid =========== A New Block needed after read ==========="
// << "block id " << (byte_offset) / (BLKNBS)<< endl;
if (byte_offset < max_n_bytes) {
fread(buffer, sizeof(char),
min((long long)BLKNBS, max_n_bytes-byte_offset),
fp_codes);
}
}
return cid;
}
void query_processing_scan_pqcodes(const string &dataset_path,
const vector<float> &query, int top_k, int M, int K,
int m_Ds, uint num_codes,
const vector<PQ::Array> &m_codewords,
vector<pair<int, float>> &results)
{
string file_name = dataset_path+"/codes.bin.plain.M"+to_string(M)
+"K"+to_string(K);
FILE* fp_codes;
uchar* buffer = new uchar[BLKNBS];
// cout << file_name << endl;
fp_codes = fopen(file_name.c_str(), "r");
fread(buffer, sizeof(char), sizeof(long long), fp_codes);
num_codes = ((long long*)buffer)[0];
// calculate distance lookup table
if (m_sub_distances == NULL) {
m_sub_distances = new float*[PQ_M]; // m_sub_distances defined in .h file
for (int i = 0; i < PQ_M; i++) {
m_sub_distances[i] = new float[PQ_K];
memset(m_sub_distances[i], 0, sizeof(float)*PQ_K);
}
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j ++) {
m_sub_distances[i][j] = .0;
for (int k = 0; k < m_Ds; k ++) {
m_sub_distances[i][j] += pow(m_codewords[i][j][k]
- query[i*m_Ds+k], 2);
}
}
}
// < <lowerbound, upperbound>, <distance, node_position> >
priority_queue<pair<float, uint>,
vector<pair<float, uint>>,
decltype(cmp_max)> max_heap(cmp_max);
// get first page of data
fread(buffer, sizeof(char), BLKNBS, fp_codes);
long long n_bytes = ((long long)num_codes) * M;
long long byte_offset = 0;
for (long i = 0; i < num_codes; i ++) {
// calculdate distance to query
float dist = 0;
for (int m = 0; m < M; m ++) {
uchar cid = get_cid_from_pqcodes(buffer,
byte_offset, fp_codes, n_bytes);
dist += m_sub_distances[m][cid];
}
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i);
}
}
for (int i = top_k-1; i >= 0; i --) {
const pair<float, uint>& top = max_heap.top();
results[i].first = top.second;
results[i].second= top.first;
max_heap.pop();
}
}
//=========================== efficient decoding ========================
// this works for the compression with two 4-bit-depths in one byte
inline int get_byte_from_compressed_codes(uchar* buffer,
long long& byte_offset, FILE* &fp_codes,
long long max_n_bytes) {
int cid = 0;
long long boffset = byte_offset % (BLKNBS);
// cout << "in cid boffset is " << boffset << " bit_offset is "
// << bit_offset << endl;
cid = buffer[boffset];
byte_offset += 1;
if (boffset + 1 == BLKNBS) {
// cout << "int get_cid =========== A New Block needed after read ==========="
// << "block id " << (byte_offset) / (BLKNBS)<< endl;
if (byte_offset < max_n_bytes) {
fread(buffer, sizeof(char),
min((long long)BLKNBS, max_n_bytes-byte_offset),
fp_codes);
}
}
return cid;
}
void query_processing_scan_compressed_codes_opt(const string &dataset_path,
const vector<float> &query, int top_k, int M, int K,
int m_Ds, uint num_codes,
const vector<PQ::Array> &m_codewords,
vector<pair<int, float>> &results, uchar** decoder)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_compressed_codes_opt";
file_name = file_name + "_N" + to_string(num_codes);
FILE* fp_codes;
uchar* buffer = new uchar[BLKNBS];
cout << file_name << endl;
fp_codes = fopen(file_name.c_str(), "r");
fread(buffer, sizeof(char), sizeof(long long)*2, fp_codes);
long long n_codes = ((long long*)buffer)[0];
long long n_bytes = ((long long*)buffer)[1];
if (num_codes == -1) num_codes = n_codes;
else if (num_codes != n_codes) {
cout << "scan only part of the codes " << num_codes << " / "
<< n_codes << endl;
}
// cout << "n_codes = " << n_codes << endl;
// cout << "n_bytes = " << n_bytes << endl;
// cout << BLKNBITS<< endl;
// calculate distance lookup table
if (m_sub_distances == NULL) {
m_sub_distances = new float*[PQ_M]; // m_sub_distances defined in .h file
for (int i = 0; i < PQ_M; i++) {
m_sub_distances[i] = new float[PQ_K];
memset(m_sub_distances[i], 0, sizeof(float)*PQ_K);
}
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j ++) {
m_sub_distances[i][j] = .0;
for (int k = 0; k < m_Ds; k ++) {
m_sub_distances[i][j] += pow(m_codewords[i][j][k]
- query[i*m_Ds+k], 2);
}
}
}
// < <lowerbound, upperbound>, <distance, node_position> >
priority_queue<pair<float, uint>,
vector<pair<float, uint>>,
decltype(cmp_max)> max_heap(cmp_max);
// get first page of data
fread(buffer, sizeof(char), BLKNBS, fp_codes);
long long byte_offset=0;
double qdist = 0;
uchar* stacks = new uchar[M*M];
uchar** vecs_stack = new uchar*[M];
for (int i = 0; i < M; i ++) {
vecs_stack[i] = stacks+i*M;
}
// vector<vector<uchar>> vecs_stack(M, vector<uchar>(M, 0));
vector<double> dists_stack(M,0);
for (int m = 0; m < M; m ++) {
uchar cid = get_byte_from_compressed_codes(buffer, byte_offset,
fp_codes, n_bytes);
qdist += m_sub_distances[m][cid];
vecs_stack[0][m] = cid;
}
dists_stack[0] = qdist;
max_heap.push(make_pair(qdist, 0));
long i = 1;
for (; i+1 < num_codes; i = i + 2) {
// cout << "============== i = " << i << " ===============" << endl;
// get two depths
int depths = get_byte_from_compressed_codes(buffer,
byte_offset, fp_codes, n_bytes);
// cout << bitset<8>(depths) << " " << depths << endl;
// -------------- PROCESS the first code in this pair
int depth = depths & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
((long long*)vecs_stack[depth])[0] = ((long long*)vecs_stack[depth-1])[0];
double dist = dists_stack[depth-1];
uchar bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fp_codes, n_bytes);
int n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fp_codes, n_bytes);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
// cout << endl;
dists_stack[depth] = dist;
// cout << dist << " bit_offset " << bit_offset << endl;
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i);
}
// -------------- PROCESS the second code in this pair
depth = (depths>>4) & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
((long long*)vecs_stack[depth])[0] = ((long long*)vecs_stack[depth-1])[0];
dist = dists_stack[depth-1];
bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fp_codes, n_bytes);
n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fp_codes, n_bytes);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
// cout << endl;
dists_stack[depth] = dist;
// cout << dist << " bit_offset " << bit_offset << endl;
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i+1);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i+1);
}
}
if (i == num_codes - 1) {
// process one more code
int depth = get_byte_from_compressed_codes(buffer, byte_offset,
fp_codes, n_bytes);
#pragma simd
for (int m = 0; m < M; m ++)
vecs_stack[depth][m] = vecs_stack[depth-1][m];
double dist = dists_stack[depth-1];
uchar bitmap = get_byte_from_compressed_codes(buffer, byte_offset,
fp_codes, n_bytes);
int n_diff = decoder[bitmap][0];
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fp_codes, n_bytes);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
}
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i+1);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i+1);
}
}
for (int i = top_k-1; i >= 0; i --) {
const pair<float, uint>& top = max_heap.top();
results[i].first = top.second;
results[i].second= top.first;
max_heap.pop();
}
// cout << "byte offset after scan is " << byte_offset << endl;
}
// ========================= O_Direct =======================
inline int get_cid_from_pqcodes(uchar* buffer,
long long& byte_offset, int& fd_codes,
long long max_n_bytes) {
int cid = 0;
long long boffset = byte_offset % (BLKNBS);
// cout << "in cid boffset is " << boffset << " bit_offset is "
// << bit_offset << endl;
cid = buffer[boffset];
byte_offset += 1;
if (boffset + 1 == BLKNBS) {
// cout << "int get_cid =========== A New Block needed after read ==========="
// << "block id " << (byte_offset) / (BLKNBS)<< endl;
if (byte_offset < max_n_bytes) {
// lseek (fd_codes, BLKNBS, SEEK_CUR);
read(fd_codes, buffer,
BLKNBS);
}
}
return cid;
}
void query_processing_scan_pqcodes_o_direct(const string &dataset_path,
const vector<float> &query, int top_k, int M, int K,
int m_Ds, uint num_codes,
const vector<PQ::Array> &m_codewords,
vector<pair<int, float>> &results)
{
string file_name = dataset_path+"/codes.bin.plain.M"+to_string(M)
+"K"+to_string(K);
// uchar* buffer = new uchar[BLKNBS];
uchar* buffer = (uchar*)aligned_alloc(BLKNBS, BLKNBS);
memset(buffer, 0, BLKNBS);
int fd_codes = open(file_name.c_str(), O_DIRECT|O_RDONLY);
if (fd_codes < 0) {
cout << "cannot open file " << file_name << endl;
}
int bytes_read = read(fd_codes, buffer, BLKNBS);
if (bytes_read < 0) {
cout << "bytes read " << bytes_read << endl;
cout << "errno is " << errno << " " << strerror(errno) << endl;
}
num_codes = ((long long*)buffer)[0];
// calculate distance lookup table
if (m_sub_distances == NULL) {
m_sub_distances = new float*[PQ_M]; // m_sub_distances defined in .h file
for (int i = 0; i < PQ_M; i++) {
m_sub_distances[i] = new float[PQ_K];
memset(m_sub_distances[i], 0, sizeof(float)*PQ_K);
}
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j ++) {
m_sub_distances[i][j] = .0;
for (int k = 0; k < m_Ds; k ++) {
m_sub_distances[i][j] += pow(m_codewords[i][j][k]
- query[i*m_Ds+k], 2);
}
}
}
// < <lowerbound, upperbound>, <distance, node_position> >
priority_queue<pair<float, uint>,
vector<pair<float, uint>>,
decltype(cmp_max)> max_heap(cmp_max);
long long n_bytes = ((long long)num_codes) * M;
long long byte_offset = 8;
for (long i = 0; i < num_codes; i ++) {
// calculdate distance to query
float dist = 0;
for (int m = 0; m < M; m ++) {
uchar cid = get_cid_from_pqcodes(buffer,
byte_offset, fd_codes, n_bytes);
dist += m_sub_distances[m][cid];
}
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i);
}
}
for (int i = top_k-1; i >= 0; i --) {
const pair<float, uint>& top = max_heap.top();
results[i].first = top.second;
results[i].second= top.first;
max_heap.pop();
}
}
inline uchar get_byte(uchar* buffer,
long long& byte_offset, int& fd_codes,
long long max_n_bytes) {
uchar byte = 0;
long long boffset = byte_offset % (BLKNBS);
// cout << "in cid boffset is " << boffset << " bit_offset is "
// << bit_offset << endl;
byte = buffer[boffset];
byte_offset += 1;
if (boffset + 1 == BLKNBS) {
// cout << "int get_cid =========== A New Block needed after read ==========="
// << "block id " << (byte_offset) / (BLKNBS)<< endl;
if (byte_offset < max_n_bytes) {
// lseek (fd_codes, BLKNBS, SEEK_CUR);
read(fd_codes, buffer,
BLKNBS);
}
}
return byte;
}
void query_processing_diff_scan_o_direct(const string &dataset_path,
const vector<float> &query, int top_k, int M, int K,
int m_Ds, uint num_codes,
const vector<PQ::Array> &m_codewords,
vector<pair<int, float>> &results, uchar** decoder)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_diff_index";
file_name = file_name + "_N" + to_string(num_codes);
uchar* buffer = (uchar*)aligned_alloc(BLKNBS, BLKNBS);
memset(buffer, 0, BLKNBS);
int fd_codes = open(file_name.c_str(), O_DIRECT|O_RDONLY);
if (fd_codes < 0) {
cout << "cannot open file " << file_name << endl;
}
int bytes_read = read(fd_codes, buffer, BLKNBS);
if (bytes_read < 0) {
cout << "bytes read " << bytes_read << endl;
cout << "errno is " << errno << " " << strerror(errno) << endl;
}
num_codes = ((long long*)buffer)[0];
long long n_bytes = ((long long*)buffer)[1];
// calculate distance lookup table
if (m_sub_distances == NULL) {
m_sub_distances = new float*[PQ_M]; // m_sub_distances defined in .h file
for (int i = 0; i < PQ_M; i++) {
m_sub_distances[i] = new float[PQ_K];
memset(m_sub_distances[i], 0, sizeof(float)*PQ_K);
}
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j ++) {
m_sub_distances[i][j] = .0;
for (int k = 0; k < m_Ds; k ++) {
m_sub_distances[i][j] += pow(m_codewords[i][j][k]
- query[i*m_Ds+k], 2);
}
}
}
// < <lowerbound, upperbound>, <distance, node_position> >
priority_queue<pair<float, uint>,
vector<pair<float, uint>>,
decltype(cmp_max)> max_heap(cmp_max);
long long byte_offset = 16;
uchar* vec = new uchar[M];
double qdist = 0;
for (int m = 0; m < M; m ++) {
uchar cid = get_byte(buffer, byte_offset, fd_codes, n_bytes);
vec[m] = cid;
qdist += m_sub_distances[m][cid];
}
max_heap.emplace(qdist, 0);
for (long i = 1; i < num_codes; i ++) {
// calculdate distance to query
uchar bitmap = get_byte(buffer, byte_offset, fd_codes, n_bytes);
int n_diff = decoder[bitmap][0];
// calculdate distance to query
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte(buffer, byte_offset, fd_codes, n_bytes);
uchar from = vec[m];
qdist -= m_sub_distances[m][from];
qdist += m_sub_distances[m][cid];
vec[m] = cid;
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
if (max_heap.size() < top_k) {
max_heap.emplace(qdist, i);
} else if (qdist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(qdist, i);
}
}
for (int i = top_k-1; i >= 0; i --) {
const pair<float, uint>& top = max_heap.top();
results[i].first = top.second;
results[i].second= top.first;
max_heap.pop();
}
}
// this works for the compression with two 4-bit-depths in one byte
inline int get_byte_from_compressed_codes(uchar* buffer,
long long& byte_offset, int &fd_codes,
long long max_n_bytes) {
int cid = 0;
long long boffset = byte_offset % (BLKNBS);
// cout << "in cid boffset is " << boffset << " bit_offset is "
// << bit_offset << endl;
cid = buffer[boffset];
byte_offset += 1;
if (boffset + 1 == BLKNBS) {
// cout << "int get_cid =========== A New Block needed after read ==========="
// << "block id " << (byte_offset) / (BLKNBS)<< endl;
if (byte_offset < max_n_bytes) {
int byte_read = read(fd_codes, buffer,
BLKNBS);
if (max_n_bytes - byte_offset < BLKNBS) {
// cout << "The last block has size " << byte_read << endl;
}
}
}
return cid;
}
void query_processing_scan_compressed_codes_opt_o_direct(
const string &dataset_path,
const vector<float> &query, int top_k, int M, int K,
int m_Ds, uint num_codes,
const vector<PQ::Array> &m_codewords,
vector<pair<int, float>> &results, uchar** decoder)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_compressed_codes_opt";
file_name = file_name + "_N" + to_string(num_codes);
int fd_codes;
uchar* buffer = (uchar*)aligned_alloc(BLKNBS, BLKNBS);
cout << file_name << endl;
fd_codes = open(file_name.c_str(), O_DIRECT|O_RDONLY);
if (fd_codes < 0) {
cout << "cannot open file " << file_name << endl;
}
int byte_read = read(fd_codes, buffer, BLKNBS);
long long n_codes = ((long long*)buffer)[0];
long long n_bytes = ((long long*)buffer)[1];
if (num_codes == -1) num_codes = n_codes;
else if (num_codes != n_codes) {
cout << "scan only part of the codes " << num_codes << " / "
<< n_codes << endl;
}
// cout << "n_codes = " << n_codes << endl;
// cout << "n_bytes = " << n_bytes << endl;
// cout << BLKNBITS<< endl;
// calculate distance lookup table
if (m_sub_distances == NULL) {
m_sub_distances = new float*[PQ_M]; // m_sub_distances defined in .h file
for (int i = 0; i < PQ_M; i++) {
m_sub_distances[i] = new float[PQ_K];
memset(m_sub_distances[i], 0, sizeof(float)*PQ_K);
}
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j ++) {
m_sub_distances[i][j] = .0;
for (int k = 0; k < m_Ds; k ++) {
m_sub_distances[i][j] += pow(m_codewords[i][j][k]
- query[i*m_Ds+k], 2);
}
}
}
// < <lowerbound, upperbound>, <distance, node_position> >
priority_queue<pair<float, uint>,
vector<pair<float, uint>>,
decltype(cmp_max)> max_heap(cmp_max);
long long byte_offset=16;
n_bytes += byte_offset;
double qdist = 0;
uchar* stacks = new uchar[M*M];
uchar** vecs_stack = new uchar*[M];
for (int i = 0; i < M; i ++) {
vecs_stack[i] = stacks+i*M;
}
// vector<vector<uchar>> vecs_stack(M, vector<uchar>(M, 0));
vector<double> dists_stack(M,0);
for (int m = 0; m < M; m ++) {
uchar cid = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
qdist += m_sub_distances[m][cid];
vecs_stack[0][m] = cid;
}
dists_stack[0] = qdist;
max_heap.push(make_pair(qdist, 0));
long i = 1;
for (; i+1 < num_codes; i = i + 2) {
// cout << "============== i = " << i << " ===============" << endl;
// get two depths
int depths = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
// cout << bitset<8>(depths) << " " << depths << endl;
// -------------- PROCESS the first code in this pair
int depth = depths & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
((long long*)vecs_stack[depth])[0] = ((long long*)vecs_stack[depth-1])[0];
double dist = dists_stack[depth-1];
uchar bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
int n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
// cout << endl;
dists_stack[depth] = dist;
// cout << dist << " bit_offset " << bit_offset << endl;
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i);
}
// -------------- PROCESS the second code in this pair
depth = (depths>>4) & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
((long long*)vecs_stack[depth])[0] = ((long long*)vecs_stack[depth-1])[0];
dist = dists_stack[depth-1];
bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
// cout << endl;
dists_stack[depth] = dist;
// cout << dist << " bit_offset " << bit_offset << endl;
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i+1);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i+1);
}
}
if (i == num_codes - 1) {
// process one more code
int depth = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
#pragma simd
for (int m = 0; m < M; m ++)
vecs_stack[depth][m] = vecs_stack[depth-1][m];
double dist = dists_stack[depth-1];
uchar bitmap = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
int n_diff = decoder[bitmap][0];
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
}
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i+1);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i+1);
}
}
for (int i = top_k-1; i >= 0; i --) {
const pair<float, uint>& top = max_heap.top();
results[i].first = top.second;
results[i].second= top.first;
max_heap.pop();
}
// cout << "byte offset after scan is " << byte_offset << endl;
}
struct GreaterByDist
{
bool operator()(const pair<float,uint>& a, const pair<float,uint>& b) const
{
return a.first < b.first;
}
};
void query_processing_batch_scan_compressed_codes_opt_o_direct(
const string &dataset_path,
const vector<vector<float>> &queries, int top_k, int M, int K,
int m_Ds, uint num_codes,
const vector<PQ::Array> &m_codewords,
vector<vector<pair<int, float>>> &results, uchar** decoder)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_compressed_codes_opt";
file_name = file_name + "_N" + to_string(num_codes);
int fd_codes;
uchar* buffer = (uchar*)aligned_alloc(BLKNBS, BLKNBS);
cout << file_name << endl;
fd_codes = open(file_name.c_str(), O_DIRECT|O_RDONLY);
if (fd_codes < 0) {
cout << "cannot open file " << file_name << endl;
}
int byte_read = read(fd_codes, buffer, BLKNBS);
long long n_codes = ((long long*)buffer)[0];
long long n_bytes = ((long long*)buffer)[1];
if (num_codes == -1) num_codes = n_codes;
else if (num_codes != n_codes) {
cout << "scan only part of the codes " << num_codes << " / "
<< n_codes << endl;
}
cout << "n_codes = " << n_codes << endl;
cout << "n_bytes = " << n_bytes << endl;
// cout << BLKNBITS<< endl;
// calculate distance lookup table
if (batch_m_sub_distances == NULL) {
batch_m_sub_distances = new float**[queries.size()]; // m_sub_distances defined in .h file
for (int q = 0; q < queries.size(); q ++) {
batch_m_sub_distances[q] = new float*[PQ_M];
for (int i = 0; i < PQ_M; i++) {
batch_m_sub_distances[q][i] = new float[PQ_K];
memset(batch_m_sub_distances[q][i], 0, sizeof(float)*PQ_K);
}
}
}
for (int q = 0; q < queries.size(); q ++) {
const vector<float>& query = queries[q];
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j ++) {
batch_m_sub_distances[q][i][j] = .0;
for (int k = 0; k < m_Ds; k ++) {
batch_m_sub_distances[q][i][j] += pow(m_codewords[i][j][k]
- query[i*m_Ds+k], 2);
}
}
}
}
// < <lowerbound, upperbound>, <distance, node_position> >
vector<priority_queue<pair<float, uint>,
vector<pair<float, uint>>,
GreaterByDist>> batch_max_heap(queries.size());
long long byte_offset=16;
n_bytes += byte_offset;
double * batch_qdist = new double[queries.size()];
memset(batch_qdist, 0, sizeof(double)*queries.size());
uchar* batch_stacks = new uchar[queries.size()*M*M];
uchar*** batch_vecs_stack = new uchar**[queries.size()*M];
for (int q = 0; q < queries.size(); q ++) {
batch_vecs_stack[q] = new uchar*[M];
for (int i = 0; i < M; i ++) {
batch_vecs_stack[q][i] = batch_stacks + q*M*M +i*M;
}
}
// vector<vector<uchar>> vecs_stack(M, vector<uchar>(M, 0));
vector<vector<double>> batch_dists_stack(queries.size());
for (int q = 0; q < queries.size(); q ++) {
batch_dists_stack[q].resize(M);
}
for (int m = 0; m < M; m ++) {
uchar cid = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
for (int q = 0; q < queries.size(); q ++) {
batch_qdist[q] += batch_m_sub_distances[q][m][cid];
batch_vecs_stack[q][0][m] = cid;
}
}
for (int q = 0; q < queries.size(); q ++) {
batch_dists_stack[q][0] = batch_qdist[q];
batch_max_heap[q].push(make_pair(batch_qdist[q], 0));
}
long i = 1;
vector<float> batch_dist(queries.size());
for (; i+1 < num_codes; i = i + 2) {
// cout << "============== i = " << i << " ===============" << endl;
// get two depths
int depths = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
// cout << bitset<8>(depths) << " " << depths << endl;
// -------------- PROCESS the first code in this pair
int depth = depths & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
for (int q = 0; q < queries.size(); q ++) {
((long long*)batch_vecs_stack[q][depth])[0] = ((long long*)batch_vecs_stack[q][depth-1])[0];
batch_dist[q] = batch_dists_stack[q][depth-1];
}
uchar bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
int n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
for (int q = 0; q < queries.size();q++) {
batch_vecs_stack[q][depth][m] = cid;
uchar from = batch_vecs_stack[q][depth-1][m];
batch_dist[q] -= batch_m_sub_distances[q][m][from];
batch_dist[q] += batch_m_sub_distances[q][m][cid];
}
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
for (int q = 0; q < queries.size(); q ++) {
batch_dists_stack[q][depth] = batch_dist[q];
}
// cout << endl;
// cout << dist << " bit_offset " << bit_offset << endl;
for (int q = 0; q < queries.size(); q ++) {
if (batch_max_heap[q].size() < top_k) {
batch_max_heap[q].emplace(batch_dist[q], i);
} else if (batch_dist[q] < batch_max_heap[q].top().first) {
batch_max_heap[q].pop();
batch_max_heap[q].emplace(batch_dist[q], i);
}
}
// -------------- PROCESS the second code in this pair
depth = (depths>>4) & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
for (int q = 0; q < queries.size(); q ++) {
((long long*)batch_vecs_stack[q][depth])[0] = ((long long*)batch_vecs_stack[q][depth-1])[0];
batch_dist[q] = batch_dists_stack[q][depth-1];
}
bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
for (int q = 0; q < queries.size();q++) {
batch_vecs_stack[q][depth][m] = cid;
uchar from = batch_vecs_stack[q][depth-1][m];
batch_dist[q] -= batch_m_sub_distances[q][m][from];
batch_dist[q] += batch_m_sub_distances[q][m][cid];
}
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
for (int q = 0; q < queries.size(); q ++) {
batch_dists_stack[q][depth] = batch_dist[q];
}
// cout << endl;
// cout << dist << " bit_offset " << bit_offset << endl;
for (int q = 0; q < queries.size(); q ++) {
if (batch_max_heap[q].size() < top_k) {
batch_max_heap[q].emplace(batch_dist[q], i);
} else if (batch_dist[q] < batch_max_heap[q].top().first) {
batch_max_heap[q].pop();
batch_max_heap[q].emplace(batch_dist[q], i);
}
}
}
if (i == num_codes - 1) {
// process one more code
int depth = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
for (int q = 0; q < queries.size(); q ++) {
((long long*)batch_vecs_stack[q][depth])[0] = ((long long*)batch_vecs_stack[q][depth-1])[0];
batch_dist[q] = batch_dists_stack[q][depth-1];
}
uchar bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
int n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
for (int q = 0; q < queries.size();q++) {
batch_vecs_stack[q][depth][m] = cid;
uchar from = batch_vecs_stack[q][depth-1][m];
batch_dist[q] -= batch_m_sub_distances[q][m][from];
batch_dist[q] += batch_m_sub_distances[q][m][cid];
}
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
for (int q = 0; q < queries.size(); q ++) {
batch_dists_stack[q][depth] = batch_dist[q];
}
// cout << endl;
// cout << dist << " bit_offset " << bit_offset << endl;
for (int q = 0; q < queries.size(); q ++) {
if (batch_max_heap[q].size() < top_k) {
batch_max_heap[q].emplace(batch_dist[q], i);
} else if (batch_dist[q] < batch_max_heap[q].top().first) {
batch_max_heap[q].pop();
batch_max_heap[q].emplace(batch_dist[q], i);
}
}
}
for (int q = 0; q < queries.size(); q ++) {
for (int i = top_k-1; i >= 0; i --) {
const pair<float, uint>& top = batch_max_heap[q].top();
results[q][i].first = top.second;
results[q][i].second= top.first;
batch_max_heap[q].pop();
}
}
// cout << "byte offset after scan is " << byte_offset << endl;
}
void query_processing_opt_batch_scan_compressed_codes_opt_o_direct(
const string &dataset_path,
const vector<vector<float>> &queries, int top_k, int M, int K,
int m_Ds, uint num_codes,
const vector<PQ::Array> &m_codewords,
vector<vector<pair<int, float>>> &results, uchar** decoder)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_compressed_codes_opt";
file_name = file_name + "_N" + to_string(num_codes);
int fd_codes;
uchar* buffer = (uchar*)aligned_alloc(BLKNBS, BLKNBS);
cout << file_name << endl;
fd_codes = open(file_name.c_str(), O_DIRECT|O_RDONLY);
if (fd_codes < 0) {
cout << "cannot open file " << file_name << endl;
}
int byte_read = read(fd_codes, buffer, BLKNBS);
long long n_codes = ((long long*)buffer)[0];
long long n_bytes = ((long long*)buffer)[1];
if (num_codes == -1) num_codes = n_codes;
else if (num_codes != n_codes) {
cout << "scan only part of the codes " << num_codes << " / "
<< n_codes << endl;
}
cout << "n_codes = " << n_codes << endl;
cout << "n_bytes = " << n_bytes << endl;
// cout << BLKNBITS<< endl;
// calculate distance lookup table, batch mode
int nq = queries.size();
vector<vector<float>> m_sub_distances_batch(PQ_M);
for (int m = 0; m < M; m ++) {
m_sub_distances_batch[m].resize( K * nq );
for (int k = 0; k < K; k ++) {
for (int q = 0; q < nq; q ++) {
m_sub_distances_batch[m][k*nq+q] = 0;
for (int d = 0; d < m_Ds; d ++) {
m_sub_distances_batch[m][k*nq+q] +=
pow(m_codewords[m][k][d] - queries[q][m*m_Ds+d], 2);
}
}
}
}
// < <lowerbound, upperbound>, <distance, node_position> >
vector<priority_queue<pair<float, uint>,
vector<pair<float, uint>>,
GreaterByDist>> batch_max_heap(queries.size());
long long byte_offset=16;
n_bytes += byte_offset;
double * batch_qdist = new double[queries.size()];
memset(batch_qdist, 0, sizeof(double)*queries.size());
uchar* batch_stacks = new uchar[queries.size()*M*M];
uchar*** batch_vecs_stack = new uchar**[queries.size()*M];
for (int m = 0; m < M; m ++) {
batch_vecs_stack[m] = new uchar*[M];
for (int mm = 0; mm < M; mm ++) {
batch_vecs_stack[m][mm] = batch_stacks + m*M*nq + mm*nq;
}
}
// vector<vector<uchar>> vecs_stack(M, vector<uchar>(M, 0));
vector<vector<double>> batch_dists_stack(M);
for (int m = 0; m < M; m ++) {
batch_dists_stack[m].resize(nq);
}
for (int m = 0; m < M; m ++) {
uchar cid = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
for (int q = 0; q < queries.size(); q ++) {
batch_qdist[q] += m_sub_distances_batch[m][cid*nq+q];
batch_vecs_stack[0][m][q] = cid;
}
}
for (int q = 0; q < queries.size(); q ++) {
batch_dists_stack[0][q] = batch_qdist[q];
batch_max_heap[q].push(make_pair(batch_qdist[q], 0));
}
long i = 1;
vector<float> batch_dist(queries.size());
for (; i+1 < num_codes; i = i + 2) {
// cout << "============== i = " << i << " ===============" << endl;
// get two depths
int depths = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
// cout << bitset<8>(depths) << " " << depths << endl;
// -------------- PROCESS the first code in this pair
int depth = depths & 7;
// copy vector from parent
// #pragma simd
memcpy(batch_vecs_stack[depth][0], batch_vecs_stack[depth-1][0], sizeof(uchar)*nq*M);
#pragma simd
for (int q = 0; q < queries.size(); q ++) {
batch_dist[q] = batch_dists_stack[depth-1][q];
}
uchar bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
int n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
#pragma simd
for (int q = 0; q < queries.size();q++) {
batch_vecs_stack[depth][m][q] = cid;
uchar from = batch_vecs_stack[depth-1][m][q];
batch_dist[q] -= m_sub_distances_batch[m][from*nq+q];
batch_dist[q] += m_sub_distances_batch[m][cid*nq+q];
}
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
for (int q = 0; q < queries.size(); q ++) {
batch_dists_stack[depth][q] = batch_dist[q];
}
// cout << endl;
// cout << dist << " bit_offset " << bit_offset << endl;
for (int q = 0; q < queries.size(); q ++) {
if (batch_max_heap[q].size() < top_k) {
batch_max_heap[q].emplace(batch_dist[q], i);
} else if (batch_dist[q] < batch_max_heap[q].top().first) {
batch_max_heap[q].pop();
batch_max_heap[q].emplace(batch_dist[q], i);
}
}
// -------------- PROCESS the second code in this pair
depth = (depths>>4) & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
memcpy(batch_vecs_stack[depth][0], batch_vecs_stack[depth-1][0], sizeof(uchar)*nq*M);
#pragma simd
for (int q = 0; q < queries.size(); q ++) {
batch_dist[q] = batch_dists_stack[depth-1][q];
}
bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
#pragma simd
for (int q = 0; q < queries.size();q++) {
batch_vecs_stack[depth][m][q] = cid;
uchar from = batch_vecs_stack[depth-1][m][q];
batch_dist[q] -= m_sub_distances_batch[m][from*nq+q];
batch_dist[q] += m_sub_distances_batch[m][cid*nq+q];
}
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
for (int q = 0; q < queries.size(); q ++) {
batch_dists_stack[depth][q] = batch_dist[q];
}
// cout << endl;
// cout << dist << " bit_offset " << bit_offset << endl;
for (int q = 0; q < queries.size(); q ++) {
if (batch_max_heap[q].size() < top_k) {
batch_max_heap[q].emplace(batch_dist[q], i);
} else if (batch_dist[q] < batch_max_heap[q].top().first) {
batch_max_heap[q].pop();
batch_max_heap[q].emplace(batch_dist[q], i);
}
}
}
if (i == num_codes - 1) {
// process one more code
int depth = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
memcpy(batch_vecs_stack[depth][0], batch_vecs_stack[depth-1][0], sizeof(uchar)*nq*M);
for (int q = 0; q < queries.size(); q ++) {
batch_dist[q] = batch_dists_stack[depth-1][q];
}
uchar bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
int n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
for (int q = 0; q < queries.size();q++) {
batch_vecs_stack[depth][m][q] = cid;
uchar from = batch_vecs_stack[depth-1][m][q];
batch_dist[q] -= m_sub_distances_batch[m][from*nq+q];
batch_dist[q] += m_sub_distances_batch[m][cid*nq+q];
}
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
for (int q = 0; q < queries.size(); q ++) {
batch_dists_stack[depth][q] = batch_dist[q];
}
// cout << endl;
// cout << dist << " bit_offset " << bit_offset << endl;
for (int q = 0; q < queries.size(); q ++) {
if (batch_max_heap[q].size() < top_k) {
batch_max_heap[q].emplace(batch_dist[q], i);
} else if (batch_dist[q] < batch_max_heap[q].top().first) {
batch_max_heap[q].pop();
batch_max_heap[q].emplace(batch_dist[q], i);
}
}
}
for (int q = 0; q < queries.size(); q ++) {
for (int i = top_k-1; i >= 0; i --) {
const pair<float, uint>& top = batch_max_heap[q].top();
results[q][i].first = top.second;
results[q][i].second= top.first;
batch_max_heap[q].pop();
}
}
// cout << "byte offset after scan is " << byte_offset << endl;
}
//
inline void skip_bytes_from_compressed_codes(int &fd_codes,
long long n_bytes_to_skip) {
}
void row_store_query_processing_scan_compressed_codes_opt_o_direct(
const string &dataset_path,
const vector<float> &query, int top_k, int M, int K,
int m_Ds, uint num_codes,
const vector<PQ::Array> &m_codewords,
vector<pair<int, float>> &results, uchar** decoder)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_compressed_codes_opt";
file_name = file_name + "_N" + to_string(num_codes);
file_name = file_name + "_row_store";
int fd_codes;
uchar* buffer = (uchar*)aligned_alloc(BLKNBS, BLKNBS);
cout << file_name << endl;
fd_codes = open(file_name.c_str(), O_DIRECT|O_RDONLY);
if (fd_codes < 0) {
cout << "cannot open file " << file_name << endl;
}
int byte_read = read(fd_codes, buffer, BLKNBS);
long long n_codes = ((long long*)buffer)[0];
long long n_bytes = ((long long*)buffer)[1];
if (num_codes == -1) num_codes = n_codes;
else if (num_codes != n_codes) {
cout << "scan only part of the codes " << num_codes << " / "
<< n_codes << endl;
}
cout << "n_codes = " << n_codes << endl;
cout << "n_bytes = " << n_bytes << endl;
// cout << BLKNBITS<< endl;
// calculate distance lookup table
if (m_sub_distances == NULL) {
m_sub_distances = new float*[PQ_M]; // m_sub_distances defined in .h file
for (int i = 0; i < PQ_M; i++) {
m_sub_distances[i] = new float[PQ_K];
memset(m_sub_distances[i], 0, sizeof(float)*PQ_K);
}
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j ++) {
m_sub_distances[i][j] = .0;
for (int k = 0; k < m_Ds; k ++) {
m_sub_distances[i][j] += pow(m_codewords[i][j][k]
- query[i*m_Ds+k], 2);
}
}
}
// < <lowerbound, upperbound>, <distance, node_position> >
priority_queue<pair<float, uint>,
vector<pair<float, uint>>,
decltype(cmp_max)> max_heap(cmp_max);
long long byte_offset=16;
n_bytes += byte_offset;
double qdist = 0;
uchar* stacks = new uchar[M*M];
uchar** vecs_stack = new uchar*[M];
for (int i = 0; i < M; i ++) {
vecs_stack[i] = stacks+i*M;
}
// vector<vector<uchar>> vecs_stack(M, vector<uchar>(M, 0));
vector<double> dists_stack(M,0);
for (int m = 0; m < M; m ++) {
uchar cid = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
qdist += m_sub_distances[m][cid];
vecs_stack[0][m] = cid;
}
dists_stack[0] = qdist;
max_heap.push(make_pair(qdist, 0));
long i = 1;
for (; i+1 < num_codes; i = i + 2) {
// cout << "============== i = " << i << " ===============" << endl;
// get two depths
int depths = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
// cout << bitset<8>(depths) << " " << depths << endl;
// -------------- PROCESS the first code in this pair
int depth = depths & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
((long long*)vecs_stack[depth])[0] = ((long long*)vecs_stack[depth-1])[0];
double dist = dists_stack[depth-1];
uchar bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
int n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
// cout << endl;
dists_stack[depth] = dist;
// cout << dist << " bit_offset " << bit_offset << endl;
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i);
}
// -------------- PROCESS the second code in this pair
depth = (depths>>4) & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
((long long*)vecs_stack[depth])[0] = ((long long*)vecs_stack[depth-1])[0];
dist = dists_stack[depth-1];
bitmap = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
// cout << endl;
dists_stack[depth] = dist;
// cout << dist << " bit_offset " << bit_offset << endl;
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i+1);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i+1);
}
// skip two raw data points
if (ext == "fvecs") {
for (int j = 0; j < dim*4*2; j ++) {
get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
}
} else { // bvecs
for (int j = 0; j < dim*2; j ++) {
get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
}
}
}
if (i == num_codes - 1) {
// process one more code
int depth = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
#pragma simd
for (int m = 0; m < M; m ++)
vecs_stack[depth][m] = vecs_stack[depth-1][m];
double dist = dists_stack[depth-1];
uchar bitmap = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
int n_diff = decoder[bitmap][0];
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes(buffer,
byte_offset, fd_codes, n_bytes);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
}
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i+1);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i+1);
}
}
for (int i = top_k-1; i >= 0; i --) {
const pair<float, uint>& top = max_heap.top();
results[i].first = top.second;
results[i].second= top.first;
max_heap.pop();
}
// cout << "byte offset after scan is " << byte_offset << endl;
}
void update_query_processing_simulation(
const string &dataset_path,
int M, int K,
int m_Ds, uint num_codes, uint num_updates,
const vector<PQ::Array> &m_codewords,
uchar** decoder)
{
string file_name = dataset_path + "/M" + to_string(M) + "K" + to_string(K)
+ "_Approx_compressed_codes_opt";
file_name = file_name + "_N" + to_string(num_codes);
int fd_codes;
uchar* buffer = (uchar*)aligned_alloc(BLKNBS, BLKNBS);
cout << file_name << endl;
fd_codes = open(file_name.c_str(), O_DIRECT|O_RDONLY);
if (fd_codes < 0) {
cout << "cannot open file " << file_name << endl;
}
int byte_read = read(fd_codes, buffer, BLKNBS);
long long n_codes = ((long long*)buffer)[0];
long long n_bytes = ((long long*)buffer)[1];
if (num_codes == -1) num_codes = n_codes;
else if (num_codes != n_codes) {
cout << "scan only part of the codes " << num_codes << " / "
<< n_codes << endl;
}
cout << "n_codes = " << n_codes << endl;
cout << "n_bytes = " << n_bytes << endl;
// cout << BLKNBITS<< endl;
long long byte_offset=16;
n_bytes += byte_offset;
uchar* stacks = new uchar[M*M];
uchar** vecs_stack = new uchar*[M];
for (int i = 0; i < M; i ++) {
vecs_stack[i] = stacks+i*M;
}
for (int m = 0; m < M; m ++) {
uchar cid = get_byte_from_compressed_codes(buffer, byte_offset,
fd_codes, n_bytes);
vecs_stack[0][m] = cid;
}
// update
long long total_inc_diffs = 0;
double total_inc_size = 0;
n_bytes -= num_codes/4;
for (int i = 0; i < num_updates; i ++) {
int inc_diffs = 0;
vector<uchar> new_vec(8);
uchar bitmap = 0;
for (int m = 0; m < M; m ++) {
uchar cid = rand() % K;
if (cid != vecs_stack[0][m]) {
bitmap = bitmap | (1<<m);
inc_diffs ++;
}
new_vec[0] = cid;
}
ofstream ofs(dataset_path + "/tmp", ios::binary);
ofs.write(reinterpret_cast<char*> (&bitmap), sizeof(uchar));
for (int m = 0; m < M; m ++) {
uchar cid = new_vec[0];
if (cid != vecs_stack[0][m])
ofs.write(reinterpret_cast<char*> (&cid), sizeof(uchar));
}
ofs.close();
total_inc_diffs += inc_diffs;
total_inc_size += 1; // bitmap
total_inc_size += inc_diffs; // diffs
total_inc_size += 0.25; // two marking bits
if (i % (num_updates/100) == 0) {
cout << i << " " << total_inc_diffs << " " << total_inc_size / n_bytes << endl;
}
}
}
//===================== In Memory Query ===================
inline int get_byte_from_compressed_codes_in_memory(uchar* buffer,
long long& byte_offset) {
return buffer[byte_offset++];
}
void query_processing_scan_compressed_codes_opt_in_memory(
uchar* codes, long long n_bytes,
const vector<float> &query, int top_k, int M, int K,
int m_Ds, uint num_codes,
const vector<PQ::Array> &m_codewords,
vector<pair<int, float>> &results, uchar** decoder)
{
long long n_codes = num_codes;
cout << "n_codes = " << n_codes << endl;
cout << "n_bytes = " << n_bytes << endl;
// cout << BLKNBITS<< endl;
// calculate distance lookup table
if (m_sub_distances == NULL) {
m_sub_distances = new float*[PQ_M]; // m_sub_distances defined in .h file
for (int i = 0; i < PQ_M; i++) {
m_sub_distances[i] = new float[PQ_K];
memset(m_sub_distances[i], 0, sizeof(float)*PQ_K);
}
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < K; j ++) {
m_sub_distances[i][j] = .0;
for (int k = 0; k < m_Ds; k ++) {
m_sub_distances[i][j] += pow(m_codewords[i][j][k]
- query[i*m_Ds+k], 2);
}
}
}
// < <lowerbound, upperbound>, <distance, node_position> >
priority_queue<pair<float, uint>,
vector<pair<float, uint>>,
decltype(cmp_max)> max_heap(cmp_max);
long long byte_offset=0;
n_bytes += byte_offset;
double qdist = 0;
uchar* stacks = new uchar[M*M];
uchar** vecs_stack = new uchar*[M];
for (int i = 0; i < M; i ++) {
vecs_stack[i] = stacks+i*M;
}
// vector<vector<uchar>> vecs_stack(M, vector<uchar>(M, 0));
vector<double> dists_stack(M,0);
for (int m = 0; m < M; m ++) {
uchar cid = get_byte_from_compressed_codes_in_memory(codes, byte_offset);
qdist += m_sub_distances[m][cid];
vecs_stack[0][m] = cid;
cout << (int)cid << " ";
}
cout << endl;
dists_stack[0] = qdist;
max_heap.push(make_pair(qdist, 0));
long i = 1;
for (; i+1 < num_codes; i = i + 2) {
// cout << "============== i = " << i << " ===============" << endl;
// get two depths
int depths = get_byte_from_compressed_codes_in_memory(codes,
byte_offset);
// cout << bitset<8>(depths) << " " << depths << endl;
// -------------- PROCESS the first code in this pair
int depth = depths & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
((long long*)vecs_stack[depth])[0] = ((long long*)vecs_stack[depth-1])[0];
double dist = dists_stack[depth-1];
uchar bitmap = get_byte_from_compressed_codes_in_memory(codes,
byte_offset);
int n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes_in_memory(codes,
byte_offset);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
// cout << endl;
dists_stack[depth] = dist;
// cout << dist << " bit_offset " << bit_offset << endl;
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i);
}
// -------------- PROCESS the second code in this pair
depth = (depths>>4) & 7;
// copy vector from parent
// #pragma simd
// for (int m = 0; m < M; m ++)
// vecs_stack[depth][m] = vecs_stack[depth-1][m];
((long long*)vecs_stack[depth])[0] = ((long long*)vecs_stack[depth-1])[0];
dist = dists_stack[depth-1];
bitmap = get_byte_from_compressed_codes_in_memory(codes,
byte_offset);
n_diff = decoder[bitmap][0];
// calculdate distance to query
// cout << "DEPTH " << depth << " n_diff = " << n_diff << " " << endl;
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes_in_memory(codes,
byte_offset);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
// cout << "(" << (int)from << "," << (int)cid << ") ";
}
// cout << endl;
dists_stack[depth] = dist;
// cout << dist << " bit_offset " << bit_offset << endl;
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i+1);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i+1);
}
}
if (i == num_codes - 1) {
// process one more code
int depth = get_byte_from_compressed_codes_in_memory(codes, byte_offset);
#pragma simd
for (int m = 0; m < M; m ++)
vecs_stack[depth][m] = vecs_stack[depth-1][m];
double dist = dists_stack[depth-1];
uchar bitmap = get_byte_from_compressed_codes_in_memory(codes, byte_offset);
int n_diff = decoder[bitmap][0];
for (int j = 0; j < n_diff; j ++) {
int m = decoder[bitmap][j+1];
uchar cid = get_byte_from_compressed_codes_in_memory(codes,
byte_offset);
vecs_stack[depth][m] = cid;
uchar from = vecs_stack[depth-1][m];
dist -= m_sub_distances[m][from];
dist += m_sub_distances[m][cid];
}
if (max_heap.size() < top_k) {
max_heap.emplace(dist, i+1);
} else if (dist < max_heap.top().first) {
max_heap.pop();
max_heap.emplace(dist, i+1);
}
}
for (int i = top_k-1; i >= 0; i --) {
const pair<float, uint>& top = max_heap.top();
results[i].first = top.second;
results[i].second= top.first;
max_heap.pop();
}
// cout << "byte offset after scan is " << byte_offset << endl;
}
|
bicg.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "bicg.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(r,NX,nx),
DATA_TYPE POLYBENCH_1D(p,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
p[i] = i * M_PI;
for (i = 0; i < nx; i++) {
r[i] = i * M_PI;
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1))/nx;
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx, int ny,
DATA_TYPE POLYBENCH_1D(s,NY,ny),
DATA_TYPE POLYBENCH_1D(q,NX,nx))
{
int i;
for (i = 0; i < ny; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, s[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, q[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_bicg(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(s,NY,ny),
DATA_TYPE POLYBENCH_1D(q,NX,nx),
DATA_TYPE POLYBENCH_1D(p,NY,ny),
DATA_TYPE POLYBENCH_1D(r,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel
{
#pragma omp for
for (i = 0; i < _PB_NY; i++)
s[i] = 0;
#pragma omp for private (j)
for (i = 0; i < _PB_NX; i++)
{
q[i] = 0;
for (j = 0; j < _PB_NY; j++)
{
s[j] = s[j] + r[i] * A[i][j];
q[i] = q[i] + A[i][j] * p[j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(s, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(q, DATA_TYPE, NX, nx);
POLYBENCH_1D_ARRAY_DECL(p, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(r, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(r),
POLYBENCH_ARRAY(p));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_bicg (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(s),
POLYBENCH_ARRAY(q),
POLYBENCH_ARRAY(p),
POLYBENCH_ARRAY(r));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, ny, POLYBENCH_ARRAY(s), POLYBENCH_ARRAY(q)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(s);
POLYBENCH_FREE_ARRAY(q);
POLYBENCH_FREE_ARRAY(p);
POLYBENCH_FREE_ARRAY(r);
return 0;
}
|
residual_based_bdf_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_BDF_SCHEME )
#define KRATOS_RESIDUAL_BASED_BDF_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "includes/checks.h"
#include "utilities/time_discretization.h"
#include "solving_strategies/schemes/residual_based_implicit_time_scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBDFScheme
* @ingroup KratosCore
* @brief BDF integration scheme (for dynamic problems)
* @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method.
* This scheme is designed to solve a system of the type:
*\f[
* \mathbf{M} \frac{d^2(u_{n0})}{dt^2} + \mathbf{D} \frac{d(un0)}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext}
* \f]
*
* If we call:
*
* - Second derivative:
* -# \f$ \ddot{u}_{ni} \f$ the second derivative at the step i
* - First derivative:
* -# \f$ \dot{u}_{ni} \f$ the first derivative at the step i
* - Third derivative:
* -# \f$ u_{ni} \f$ the variable at the step i
*
* Then we assume:
* \f[ \frac{d^2(u_{n0})}{dt^2} \|t_{n0} = \sum_i c_i \dot{u}_{ni} \f]
* \f[ \frac{d(u_{n0})}{dt} \|t_{n0} = \sum_i c_i u_{n0} \f]
* with for order 2 (BDF2):
* -# \f$ c_0 = \frac{1.5}{dt} \f$
* -# \f$ c_1 = \frac{-2.0}{dt} \f$
* -# \f$ c_2 = \frac{0.5}{dt} \f$
*
* The LHS and RHS can be defined as:
* \f[ RHS = \mathbf{f}_{ext} - \mathbf{M} \frac{d(\dot{u}_{n0})}{dt} - \mathbf{D} \frac{d(u_{n0})}{dt} - \mathbf{K} u_{n0} \f]
* and
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2 \mathbf{M} + c_0 \mathbf{D} + K \f]
* @note This implies that elements are expected to be written in terms
* of a variable with two time derivatives
* <a href="https://mediatum.ub.tum.de/doc/1223319/80942.pdf">Main reference</a>
* @todo Create a BibTeX file https://www.stack.nl/~dimitri/doxygen/manual/commands.html#cmdcite
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class ResidualBasedBDFScheme
: public ResidualBasedImplicitTimeScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType;
typedef typename ImplicitBaseType::TDataType TDataType;
typedef typename ImplicitBaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename ImplicitBaseType::TSystemMatrixType TSystemMatrixType;
typedef typename ImplicitBaseType::TSystemVectorType TSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
/// Definition of epsilon
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The BDF method
* @param Order The integration order
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFScheme(const std::size_t Order = 2)
:ImplicitBaseType(),
mOrder(Order),
mpBDFUtility(Kratos::make_unique<TimeDiscretization::BDF>(Order))
{
// Allocate auxiliary memory
const std::size_t num_threads = OpenMPUtils::GetNumThreads();
mVector.dotun0.resize(num_threads);
mVector.dot2un0.resize(num_threads);
// Doing a minimal check
KRATOS_ERROR_IF(mOrder < 1) << "ERROR:: Not possible to compute a BDF of order less than 1" << std::endl;
// We resize the BDF coefficients
if (mBDF.size() != (mOrder + 1))
mBDF.resize(mOrder + 1);
}
/** Copy Constructor.
*/
explicit ResidualBasedBDFScheme(ResidualBasedBDFScheme& rOther)
:ImplicitBaseType(rOther)
,mOrder(rOther.mOrder)
,mBDF(rOther.mBDF)
,mVector(rOther.mVector)
,mpBDFUtility(nullptr)
{
Kratos::unique_ptr<TimeDiscretization::BDF> auxiliar_pointer = Kratos::make_unique<TimeDiscretization::BDF>(mOrder);
mpBDFUtility.swap(auxiliar_pointer);
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedBDFScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedBDFScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution
* @details Incremental update within newton iteration. It updates the state variables at the end of the time step
* \f[ u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u\f]
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// Update of displacement (by DOF)
mpDofUpdater->UpdateDofs(rDofSet, rDx);
UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb);
KRATOS_CATCH( "" );
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
KRATOS_ERROR << "Calling base BDF class" << std::endl;
KRATOS_CATCH( "" );
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
* @todo I cannot find the formula for the higher orders with variable time step. I tried to deduce by myself but the result was very unstable
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
ImplicitBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
mpBDFUtility->ComputeAndSaveBDFCoefficients(r_current_process_info);
mBDF = r_current_process_info[BDF_COEFFICIENTS];
KRATOS_WARNING_IF("ResidualBasedBDFScheme", mOrder > 2)
<< "For higher orders than 2 the time step is assumed to be constant.\n";
KRATOS_CATCH( "" );
}
/**
* @brief This function is designed to be called once to perform all the checks needed on the input provided.
* @details Checks can be "expensive" as the function is designed to catch user's errors.
* @param rModelPart The model of the problem to solve
* @return Zero means all ok
*/
int Check(const ModelPart& rModelPart) const override
{
KRATOS_TRY;
const int err = ImplicitBaseType::Check(rModelPart);
if(err!=0) return err;
// Check for minimum value of the buffer index
// Verify buffer size
KRATOS_ERROR_IF(rModelPart.GetBufferSize() < mOrder + 1) << "Insufficient buffer size. Buffer size should be greater than " << mOrder + 1 << ". Current size is " << rModelPart.GetBufferSize() << std::endl;
KRATOS_CATCH( "" );
return 0;
}
/// Free memory allocated by this class.
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "base_bdf_scheme",
"integration_order" : 2
})");
// Getting base class default parameters
const Parameters base_default_parameters = ImplicitBaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBDFScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
struct GeneralVectors
{
std::vector< Vector > dotun0; /// First derivative
std::vector< Vector > dot2un0; /// Second derivative
};
const std::size_t mOrder; /// The integration order
Vector mBDF; /// The BDF coefficients
GeneralVectors mVector; /// The structure containing the derivatives
Kratos::unique_ptr<TimeDiscretization::BDF> mpBDFUtility; /// Utility to compute BDF coefficients
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Performing the update of the derivatives
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
inline void UpdateDerivatives(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
)
{
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
// Getting first node iterator
const auto it_node_begin = rModelPart.Nodes().begin();
#pragma omp parallel for
for(int i = 0; i< num_nodes; ++i) {
auto it_node = it_node_begin + i;
UpdateFirstDerivative(it_node);
UpdateSecondDerivative(it_node);
}
}
/**
* @brief Updating first time derivative (velocity)
* @param itNode the node interator
*/
virtual inline void UpdateFirstDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief Updating second time derivative (acceleration)
* @param itNode the node interator
*/
virtual inline void UpdateSecondDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief It adds the dynamic LHS contribution of the elements
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2\mathbf{M} + c_0 \mathbf{D} + \mathbf{K} \f]
* @param rLHS_Contribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
// Adding mass contribution to the dynamic stiffness
if (rM.size1() != 0) { // if M matrix declared
noalias(rLHS_Contribution) += rM * std::pow(mBDF[0], 2);
}
// Adding damping contribution
if (rD.size1() != 0) { // if D matrix declared
noalias(rLHS_Contribution) += rD * mBDF[0];
}
}
/**
* @brief It adds the dynamic RHS contribution of the objects
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rObject The object to compute
* @param rRHS_Contribution The dynamic contribution for the RHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
template <class TObjectType>
void TemplateAddDynamicsToRHS(
TObjectType& rObject,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
)
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
// Adding inertia contribution
if (rM.size1() != 0) {
rObject.GetSecondDerivativesVector(mVector.dot2un0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, mVector.dot2un0[this_thread]);
}
// Adding damping contribution
if (rD.size1() != 0) {
rObject.GetFirstDerivativesVector(mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rD, mVector.dotun0[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element& rElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Element>(rElement, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
/**
* @brief It adds the dynamic RHS contribution of the condition
* \f[ RHS = f_{ext} - \ddot{u}_{n0} \mathbf{M} + \dot{u}_{n0} \mathbf{D} + u_{n0} \mathbf{K} \f]
* @param rCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition& rCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Condition>(rCondition, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// Utility class to perform the update after solving the system, will be different in MPI runs.
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBDFScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BDF_SCHEME defined */
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(32*t2-Nz-508,512)),ceild(8*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(16*t1+Nx+29,512)),floord(32*t2+Nx+28,512)),floord(8*t3+Nx+4,512)),floord(32*t1-32*t2+Nz+Nx+27,512));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),512*t4+510),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(512*t4,t5+1);
ubv=min(512*t4+511,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
multires.c | /*
* $Id: multires.c 40693 2011-09-29 15:28:22Z mont29 $
*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2007 by Nicholas Bishop
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): none yet.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/intern/multires.c
* \ingroup bke
*/
#include "MEM_guardedalloc.h"
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_object_types.h"
#include "DNA_scene_types.h"
#include "BLI_blenlib.h"
#include "BLI_math.h"
#include "BLI_pbvh.h"
#include "BLI_editVert.h"
#include "BLI_utildefines.h"
#include "BKE_cdderivedmesh.h"
#include "BKE_mesh.h"
#include "BKE_modifier.h"
#include "BKE_multires.h"
#include "BKE_paint.h"
#include "BKE_scene.h"
#include "BKE_subsurf.h"
#include "BKE_object.h"
#include "CCGSubSurf.h"
#include <math.h>
#include <string.h>
/* MULTIRES MODIFIER */
static const int multires_max_levels = 13;
static const int multires_grid_tot[] = {0, 4, 9, 25, 81, 289, 1089, 4225, 16641, 66049, 263169, 1050625, 4198401, 16785409};
static const int multires_side_tot[] = {0, 2, 3, 5, 9, 17, 33, 65, 129, 257, 513, 1025, 2049, 4097};
static void multires_mvert_to_ss(DerivedMesh *dm, MVert *mvert);
static void multiresModifier_disp_run(DerivedMesh *dm, Mesh *me, int invert, int add, DMGridData **oldGridData, int totlvl);
DerivedMesh *get_multires_dm(Scene *scene, MultiresModifierData *mmd, Object *ob)
{
ModifierData *md= (ModifierData *)mmd;
ModifierTypeInfo *mti = modifierType_getInfo(md->type);
DerivedMesh *tdm = mesh_get_derived_deform(scene, ob, CD_MASK_BAREMESH);
DerivedMesh *dm;
dm = mti->applyModifier(md, ob, tdm, 0, 1);
if (dm == tdm) {
dm = CDDM_copy(tdm);
}
return dm;
}
MultiresModifierData *find_multires_modifier_before(Scene *scene, ModifierData *lastmd)
{
ModifierData *md;
for(md = lastmd; md; md = md->prev) {
if(md->type == eModifierType_Multires) {
if (modifier_isEnabled(scene, md, eModifierMode_Realtime))
return (MultiresModifierData*)md;
}
}
return NULL;
}
/* used for applying scale on mdisps layer and syncing subdivide levels when joining objects
use_first - return first multires modifier if all multires'es are disabled
*/
MultiresModifierData *get_multires_modifier(Scene *scene, Object *ob, int use_first)
{
ModifierData *md;
MultiresModifierData *mmd= NULL, *firstmmd= NULL;
/* find first active multires modifier */
for(md = ob->modifiers.first; md; md = md->next) {
if(md->type == eModifierType_Multires) {
if(!firstmmd)
firstmmd= (MultiresModifierData*)md;
if (modifier_isEnabled(scene, md, eModifierMode_Realtime)) {
mmd= (MultiresModifierData*)md;
break;
}
}
}
if(!mmd && use_first) {
/* active multires have not been found
try to use first one */
return firstmmd;
}
return mmd;
}
static int multires_get_level(Object *ob, MultiresModifierData *mmd, int render)
{
if(render)
return (mmd->modifier.scene)? get_render_subsurf_level(&mmd->modifier.scene->r, mmd->renderlvl): mmd->renderlvl;
else if(ob->mode == OB_MODE_SCULPT)
return mmd->sculptlvl;
else
return (mmd->modifier.scene)? get_render_subsurf_level(&mmd->modifier.scene->r, mmd->lvl): mmd->lvl;
}
static void multires_set_tot_level(Object *ob, MultiresModifierData *mmd, int lvl)
{
mmd->totlvl = lvl;
if(ob->mode != OB_MODE_SCULPT)
mmd->lvl = CLAMPIS(MAX2(mmd->lvl, lvl), 0, mmd->totlvl);
mmd->sculptlvl = CLAMPIS(MAX2(mmd->sculptlvl, lvl), 0, mmd->totlvl);
mmd->renderlvl = CLAMPIS(MAX2(mmd->renderlvl, lvl), 0, mmd->totlvl);
}
static void multires_dm_mark_as_modified(DerivedMesh *dm)
{
CCGDerivedMesh *ccgdm = (CCGDerivedMesh*)dm;
ccgdm->multires.modified = 1;
}
void multires_mark_as_modified(Object *ob)
{
if(ob && ob->derivedFinal)
multires_dm_mark_as_modified(ob->derivedFinal);
}
void multires_force_update(Object *ob)
{
if(ob) {
if(ob->derivedFinal) {
ob->derivedFinal->needsFree =1;
ob->derivedFinal->release(ob->derivedFinal);
ob->derivedFinal = NULL;
}
if(ob->sculpt && ob->sculpt->pbvh) {
BLI_pbvh_free(ob->sculpt->pbvh);
ob->sculpt->pbvh= NULL;
}
}
}
void multires_force_external_reload(Object *ob)
{
Mesh *me = get_mesh(ob);
CustomData_external_reload(&me->fdata, &me->id, CD_MASK_MDISPS, me->totface);
multires_force_update(ob);
}
void multires_force_render_update(Object *ob)
{
if(ob && (ob->mode & OB_MODE_SCULPT) && modifiers_findByType(ob, eModifierType_Multires))
multires_force_update(ob);
}
int multiresModifier_reshapeFromDM(Scene *scene, MultiresModifierData *mmd,
Object *ob, DerivedMesh *srcdm)
{
DerivedMesh *mrdm = get_multires_dm (scene, mmd, ob);
if(mrdm && srcdm && mrdm->getNumVerts(mrdm) == srcdm->getNumVerts(srcdm)) {
multires_mvert_to_ss(mrdm, srcdm->getVertArray(srcdm));
multires_dm_mark_as_modified(mrdm);
multires_force_update(ob);
mrdm->release(mrdm);
return 1;
}
if(mrdm) mrdm->release(mrdm);
return 0;
}
/* Returns 1 on success, 0 if the src's totvert doesn't match */
int multiresModifier_reshape(Scene *scene, MultiresModifierData *mmd, Object *dst, Object *src)
{
DerivedMesh *srcdm = mesh_get_derived_final(scene, src, CD_MASK_BAREMESH);
return multiresModifier_reshapeFromDM(scene, mmd, dst, srcdm);
}
int multiresModifier_reshapeFromDeformMod(Scene *scene, MultiresModifierData *mmd,
Object *ob, ModifierData *md)
{
ModifierTypeInfo *mti = modifierType_getInfo(md->type);
DerivedMesh *dm, *ndm;
int numVerts, result;
float (*deformedVerts)[3];
if(multires_get_level(ob, mmd, 0) == 0)
return 0;
/* Create DerivedMesh for deformation modifier */
dm = get_multires_dm(scene, mmd, ob);
numVerts= dm->getNumVerts(dm);
deformedVerts= MEM_callocN(sizeof(float)*numVerts*3, "multiresReshape_deformVerts");
dm->getVertCos(dm, deformedVerts);
mti->deformVerts(md, ob, dm, deformedVerts, numVerts, 0, 0);
ndm= CDDM_copy(dm);
CDDM_apply_vert_coords(ndm, deformedVerts);
MEM_freeN(deformedVerts);
dm->release(dm);
/* Reshaping */
result= multiresModifier_reshapeFromDM(scene, mmd, ob, ndm);
/* Cleanup */
ndm->release(ndm);
return result;
}
/* reset the multires levels to match the number of mdisps */
static int get_levels_from_disps(Object *ob)
{
Mesh *me = ob->data;
MDisps *mdisp;
int i, totlvl= 0;
mdisp = CustomData_get_layer(&me->fdata, CD_MDISPS);
for(i = 0; i < me->totface; ++i, ++mdisp) {
int S = me->mface[i].v4 ? 4 : 3;
if(mdisp->totdisp == 0) continue;
while(1) {
int side = (1 << (totlvl-1)) + 1;
int lvl_totdisp = side*side*S;
if(mdisp->totdisp == lvl_totdisp)
break;
else if(mdisp->totdisp < lvl_totdisp)
--totlvl;
else
++totlvl;
}
}
return totlvl;
}
/* reset the multires levels to match the number of mdisps */
void multiresModifier_set_levels_from_disps(MultiresModifierData *mmd, Object *ob)
{
Mesh *me = ob->data;
MDisps *mdisp;
if(me->edit_mesh)
mdisp = CustomData_get_layer(&me->edit_mesh->fdata, CD_MDISPS);
else
mdisp = CustomData_get_layer(&me->fdata, CD_MDISPS);
if(mdisp) {
mmd->totlvl = get_levels_from_disps(ob);
mmd->lvl = MIN2(mmd->sculptlvl, mmd->totlvl);
mmd->sculptlvl = MIN2(mmd->sculptlvl, mmd->totlvl);
mmd->renderlvl = MIN2(mmd->renderlvl, mmd->totlvl);
}
}
static void multires_set_tot_mdisps(Mesh *me, int lvl)
{
MDisps *mdisps= CustomData_get_layer(&me->fdata, CD_MDISPS);
int i;
if(mdisps) {
for(i = 0; i < me->totface; i++) {
if(mdisps[i].totdisp == 0) {
int nvert = (me->mface[i].v4)? 4: 3;
mdisps[i].totdisp = multires_grid_tot[lvl]*nvert;
}
}
}
}
static void multires_reallocate_mdisps(Mesh *me, MDisps *mdisps, int lvl)
{
int i;
/* reallocate displacements to be filled in */
for(i = 0; i < me->totface; ++i) {
int nvert = (me->mface[i].v4)? 4: 3;
int totdisp = multires_grid_tot[lvl]*nvert;
float (*disps)[3] = MEM_callocN(sizeof(float) * 3 * totdisp, "multires disps");
if(mdisps[i].disps)
MEM_freeN(mdisps[i].disps);
mdisps[i].disps = disps;
mdisps[i].totdisp = totdisp;
}
}
static void column_vectors_to_mat3(float mat[][3], float v1[3], float v2[3], float v3[3])
{
copy_v3_v3(mat[0], v1);
copy_v3_v3(mat[1], v2);
copy_v3_v3(mat[2], v3);
}
static void multires_copy_grid(float (*gridA)[3], float (*gridB)[3], int sizeA, int sizeB)
{
int x, y, j, skip;
if(sizeA > sizeB) {
skip = (sizeA-1)/(sizeB-1);
for(j = 0, y = 0; y < sizeB; y++)
for(x = 0; x < sizeB; x++, j++)
copy_v3_v3(gridA[y*skip*sizeA + x*skip], gridB[j]);
}
else {
skip = (sizeB-1)/(sizeA-1);
for(j = 0, y = 0; y < sizeA; y++)
for(x = 0; x < sizeA; x++, j++)
copy_v3_v3(gridA[j], gridB[y*skip*sizeB + x*skip]);
}
}
static void multires_copy_dm_grid(DMGridData *gridA, DMGridData *gridB, int sizeA, int sizeB)
{
int x, y, j, skip;
if(sizeA > sizeB) {
skip = (sizeA-1)/(sizeB-1);
for(j = 0, y = 0; y < sizeB; y++)
for(x = 0; x < sizeB; x++, j++)
copy_v3_v3(gridA[y*skip*sizeA + x*skip].co, gridB[j].co);
}
else {
skip = (sizeB-1)/(sizeA-1);
for(j = 0, y = 0; y < sizeA; y++)
for(x = 0; x < sizeA; x++, j++)
copy_v3_v3(gridA[j].co, gridB[y*skip*sizeB + x*skip].co);
}
}
static void multires_del_higher(MultiresModifierData *mmd, Object *ob, int lvl)
{
Mesh *me = (Mesh*)ob->data;
int levels = mmd->totlvl - lvl;
MDisps *mdisps;
multires_set_tot_mdisps(me, mmd->totlvl);
CustomData_external_read(&me->fdata, &me->id, CD_MASK_MDISPS, me->totface);
mdisps= CustomData_get_layer(&me->fdata, CD_MDISPS);
multires_force_update(ob);
if(mdisps && levels > 0) {
if(lvl > 0) {
int nsize = multires_side_tot[lvl];
int hsize = multires_side_tot[mmd->totlvl];
int i;
for(i = 0; i < me->totface; ++i) {
MDisps *mdisp= &mdisps[i];
float (*disps)[3], (*ndisps)[3], (*hdisps)[3];
int nvert = (me->mface[i].v4)? 4: 3;
int totdisp = multires_grid_tot[lvl]*nvert;
int S;
disps = MEM_callocN(sizeof(float) * 3 * totdisp, "multires disps");
ndisps = disps;
hdisps = mdisp->disps;
for(S = 0; S < nvert; S++) {
multires_copy_grid(ndisps, hdisps, nsize, hsize);
ndisps += nsize*nsize;
hdisps += hsize*hsize;
}
MEM_freeN(mdisp->disps);
mdisp->disps = disps;
mdisp->totdisp = totdisp;
}
}
else {
CustomData_external_remove(&me->fdata, &me->id, CD_MDISPS, me->totface);
CustomData_free_layer_active(&me->fdata, CD_MDISPS, me->totface);
}
}
multires_set_tot_level(ob, mmd, lvl);
}
/* direction=1 for delete higher, direction=0 for lower (not implemented yet) */
void multiresModifier_del_levels(MultiresModifierData *mmd, Object *ob, int direction)
{
Mesh *me = get_mesh(ob);
int lvl = multires_get_level(ob, mmd, 0);
int levels = mmd->totlvl - lvl;
MDisps *mdisps;
multires_set_tot_mdisps(me, mmd->totlvl);
CustomData_external_read(&me->fdata, &me->id, CD_MASK_MDISPS, me->totface);
mdisps= CustomData_get_layer(&me->fdata, CD_MDISPS);
multires_force_update(ob);
if(mdisps && levels > 0 && direction == 1) {
multires_del_higher(mmd, ob, lvl);
}
multires_set_tot_level(ob, mmd, lvl);
}
static DerivedMesh *multires_dm_create_local(Object *ob, DerivedMesh *dm, int lvl, int totlvl, int simple)
{
MultiresModifierData mmd= {{NULL}};
mmd.lvl = lvl;
mmd.sculptlvl = lvl;
mmd.renderlvl = lvl;
mmd.totlvl = totlvl;
mmd.simple = simple;
return multires_dm_create_from_derived(&mmd, 1, dm, ob, 0, 0);
}
static DerivedMesh *subsurf_dm_create_local(Object *ob, DerivedMesh *dm, int lvl, int simple, int optimal, int plain_uv)
{
SubsurfModifierData smd= {{NULL}};
smd.levels = smd.renderLevels = lvl;
if(!plain_uv)
smd.flags |= eSubsurfModifierFlag_SubsurfUv;
if(simple)
smd.subdivType = ME_SIMPLE_SUBSURF;
if(optimal)
smd.flags |= eSubsurfModifierFlag_ControlEdges;
return subsurf_make_derived_from_derived(dm, &smd, 0, NULL, 0, 0, (ob->mode & OB_MODE_EDIT));
}
/* assumes no is normalized; return value's sign is negative if v is on
the other side of the plane */
static float v3_dist_from_plane(float v[3], float center[3], float no[3])
{
float s[3];
sub_v3_v3v3(s, v, center);
return dot_v3v3(s, no);
}
void multiresModifier_base_apply(MultiresModifierData *mmd, Object *ob)
{
DerivedMesh *cddm, *dispdm, *origdm;
Mesh *me;
ListBase *fmap;
float (*origco)[3];
int i, j, offset, totlvl;
multires_force_update(ob);
me = get_mesh(ob);
totlvl = mmd->totlvl;
/* nothing to do */
if(!totlvl)
return;
/* XXX - probably not necessary to regenerate the cddm so much? */
/* generate highest level with displacements */
cddm = CDDM_from_mesh(me, NULL);
DM_set_only_copy(cddm, CD_MASK_BAREMESH);
dispdm = multires_dm_create_local(ob, cddm, totlvl, totlvl, 0);
cddm->release(cddm);
/* copy the new locations of the base verts into the mesh */
offset = dispdm->getNumVerts(dispdm) - me->totvert;
for(i = 0; i < me->totvert; ++i) {
dispdm->getVertCo(dispdm, offset + i, me->mvert[i].co);
}
/* heuristic to produce a better-fitting base mesh */
cddm = CDDM_from_mesh(me, NULL);
fmap = cddm->getFaceMap(ob, cddm);
origco = MEM_callocN(sizeof(float)*3*me->totvert, "multires apply base origco");
for(i = 0; i < me->totvert ;++i)
copy_v3_v3(origco[i], me->mvert[i].co);
for(i = 0; i < me->totvert; ++i) {
IndexNode *n;
float avg_no[3] = {0,0,0}, center[3] = {0,0,0}, push[3];
float dist;
int tot;
/* don't adjust verts not used by at least one face */
if(!fmap[i].first)
continue;
/* find center */
for(n = fmap[i].first, tot = 0; n; n = n->next) {
MFace *f = &me->mface[n->index];
int S = f->v4 ? 4 : 3;
/* this double counts, not sure if that's bad or good */
for(j = 0; j < S; ++j) {
int vndx = (&f->v1)[j];
if(vndx != i) {
add_v3_v3(center, origco[vndx]);
++tot;
}
}
}
mul_v3_fl(center, 1.0f / tot);
/* find normal */
for(n = fmap[i].first; n; n = n->next) {
MFace *f = &me->mface[n->index];
int S = f->v4 ? 4 : 3;
float v[4][3], no[3];
for(j = 0; j < S; ++j) {
int vndx = (&f->v1)[j];
if(vndx == i)
copy_v3_v3(v[j], center);
else
copy_v3_v3(v[j], origco[vndx]);
}
if(S == 4)
normal_quad_v3(no, v[0], v[1], v[2], v[3]);
else
normal_tri_v3(no, v[0], v[1], v[2]);
add_v3_v3(avg_no, no);
}
normalize_v3(avg_no);
/* push vertex away from the plane */
dist = v3_dist_from_plane(me->mvert[i].co, center, avg_no);
copy_v3_v3(push, avg_no);
mul_v3_fl(push, dist);
add_v3_v3(me->mvert[i].co, push);
}
MEM_freeN(origco);
cddm->release(cddm);
/* subdivide the mesh to highest level without displacements */
cddm = CDDM_from_mesh(me, NULL);
DM_set_only_copy(cddm, CD_MASK_BAREMESH);
origdm = subsurf_dm_create_local(ob, cddm, totlvl, 0, 0, mmd->flags & eMultiresModifierFlag_PlainUv);
cddm->release(cddm);
/* calc disps */
multiresModifier_disp_run(dispdm, me, 1, 0, origdm->getGridData(origdm), totlvl);
origdm->release(origdm);
dispdm->release(dispdm);
}
static void multires_subdivide(MultiresModifierData *mmd, Object *ob, int totlvl, int updateblock, int simple)
{
Mesh *me = ob->data;
MDisps *mdisps;
int lvl= mmd->totlvl;
if(totlvl > multires_max_levels)
return;
multires_force_update(ob);
mdisps = CustomData_get_layer(&me->fdata, CD_MDISPS);
if(!mdisps)
mdisps = CustomData_add_layer(&me->fdata, CD_MDISPS, CD_DEFAULT, NULL, me->totface);
if(mdisps->disps && !updateblock && totlvl > 1) {
/* upsample */
DerivedMesh *lowdm, *cddm, *highdm;
DMGridData **highGridData, **lowGridData, **subGridData;
CCGSubSurf *ss;
int i, numGrids, highGridSize, lowGridSize;
/* create subsurf DM from original mesh at high level */
cddm = CDDM_from_mesh(me, NULL);
DM_set_only_copy(cddm, CD_MASK_BAREMESH);
highdm = subsurf_dm_create_local(ob, cddm, totlvl, simple, 0, mmd->flags & eMultiresModifierFlag_PlainUv);
/* create multires DM from original mesh at low level */
lowdm = multires_dm_create_local(ob, cddm, lvl, lvl, simple);
cddm->release(cddm);
/* copy subsurf grids and replace them with low displaced grids */
numGrids = highdm->getNumGrids(highdm);
highGridSize = highdm->getGridSize(highdm);
highGridData = highdm->getGridData(highdm);
lowGridSize = lowdm->getGridSize(lowdm);
lowGridData = lowdm->getGridData(lowdm);
subGridData = MEM_callocN(sizeof(float*)*numGrids, "subGridData*");
for(i = 0; i < numGrids; ++i) {
/* backup subsurf grids */
subGridData[i] = MEM_callocN(sizeof(DMGridData)*highGridSize*highGridSize, "subGridData");
memcpy(subGridData[i], highGridData[i], sizeof(DMGridData)*highGridSize*highGridSize);
/* overwrite with current displaced grids */
multires_copy_dm_grid(highGridData[i], lowGridData[i], highGridSize, lowGridSize);
}
/* low lower level dm no longer needed at this point */
lowdm->release(lowdm);
/* subsurf higher levels again with displaced data */
ss= ((CCGDerivedMesh*)highdm)->ss;
ccgSubSurf_updateFromFaces(ss, lvl, NULL, 0);
ccgSubSurf_updateLevels(ss, lvl, NULL, 0);
/* reallocate displacements */
multires_reallocate_mdisps(me, mdisps, totlvl);
/* compute displacements */
multiresModifier_disp_run(highdm, me, 1, 0, subGridData, totlvl);
/* free */
highdm->release(highdm);
for(i = 0; i < numGrids; ++i)
MEM_freeN(subGridData[i]);
MEM_freeN(subGridData);
}
else {
/* only reallocate, nothing to upsample */
multires_reallocate_mdisps(me, mdisps, totlvl);
}
multires_set_tot_level(ob, mmd, totlvl);
}
void multiresModifier_subdivide(MultiresModifierData *mmd, Object *ob, int updateblock, int simple)
{
multires_subdivide(mmd, ob, mmd->totlvl+1, updateblock, simple);
}
static void grid_tangent(int gridSize, int index, int x, int y, int axis, DMGridData **gridData, float t[3])
{
if(axis == 0) {
if(x == gridSize - 1) {
if(y == gridSize - 1)
sub_v3_v3v3(t, gridData[index][x + gridSize*(y - 1)].co, gridData[index][x - 1 + gridSize*(y - 1)].co);
else
sub_v3_v3v3(t, gridData[index][x + gridSize*y].co, gridData[index][x - 1 + gridSize*y].co);
}
else
sub_v3_v3v3(t, gridData[index][x + 1 + gridSize*y].co, gridData[index][x + gridSize*y].co);
}
else if(axis == 1) {
if(y == gridSize - 1) {
if(x == gridSize - 1)
sub_v3_v3v3(t, gridData[index][x - 1 + gridSize*y].co, gridData[index][x - 1 + gridSize*(y - 1)].co);
else
sub_v3_v3v3(t, gridData[index][x + gridSize*y].co, gridData[index][x + gridSize*(y - 1)].co);
}
else
sub_v3_v3v3(t, gridData[index][x + gridSize*(y + 1)].co, gridData[index][x + gridSize*y].co);
}
}
static void multiresModifier_disp_run(DerivedMesh *dm, Mesh *me, int invert, int add, DMGridData **oldGridData, int totlvl)
{
CCGDerivedMesh *ccgdm = (CCGDerivedMesh*)dm;
DMGridData **gridData, **subGridData;
MFace *mface = me->mface;
MDisps *mdisps = CustomData_get_layer(&me->fdata, CD_MDISPS);
int *gridOffset;
int i, /*numGrids,*/ gridSize, dGridSize, dSkip;
if(!mdisps) {
if(invert)
mdisps = CustomData_add_layer(&me->fdata, CD_MDISPS, CD_DEFAULT, NULL, me->totface);
else
return;
}
/*numGrids = dm->getNumGrids(dm);*/ /*UNUSED*/
gridSize = dm->getGridSize(dm);
gridData = dm->getGridData(dm);
gridOffset = dm->getGridOffset(dm);
subGridData = (oldGridData)? oldGridData: gridData;
dGridSize = multires_side_tot[totlvl];
dSkip = (dGridSize-1)/(gridSize-1);
#pragma omp parallel for private(i) if(me->totface*gridSize*gridSize*4 >= CCG_OMP_LIMIT)
for(i = 0; i < me->totface; ++i) {
const int numVerts = mface[i].v4 ? 4 : 3;
MDisps *mdisp = &mdisps[i];
int S, x, y, gIndex = gridOffset[i];
/* when adding new faces in edit mode, need to allocate disps */
if(!mdisp->disps)
#pragma omp critical
{
multires_reallocate_mdisps(me, mdisps, totlvl);
}
for(S = 0; S < numVerts; ++S, ++gIndex) {
DMGridData *grid = gridData[gIndex];
DMGridData *subgrid = subGridData[gIndex];
float (*dispgrid)[3] = &mdisp->disps[S*dGridSize*dGridSize];
for(y = 0; y < gridSize; y++) {
for(x = 0; x < gridSize; x++) {
float *co = grid[x + y*gridSize].co;
float *sco = subgrid[x + y*gridSize].co;
float *no = subgrid[x + y*gridSize].no;
float *data = dispgrid[dGridSize*y*dSkip + x*dSkip];
float mat[3][3], tx[3], ty[3], disp[3], d[3];
/* construct tangent space matrix */
grid_tangent(gridSize, gIndex, x, y, 0, subGridData, tx);
normalize_v3(tx);
grid_tangent(gridSize, gIndex, x, y, 1, subGridData, ty);
normalize_v3(ty);
//mul_v3_fl(tx, 1.0f/(gridSize-1));
//mul_v3_fl(ty, 1.0f/(gridSize-1));
//cross_v3_v3v3(no, tx, ty);
column_vectors_to_mat3(mat, tx, ty, no);
if(!invert) {
/* convert to object space and add */
mul_v3_m3v3(disp, mat, data);
add_v3_v3v3(co, sco, disp);
}
else if(!add) {
/* convert difference to tangent space */
sub_v3_v3v3(disp, co, sco);
invert_m3(mat);
mul_v3_m3v3(data, mat, disp);
}
else {
/* convert difference to tangent space */
invert_m3(mat);
mul_v3_m3v3(d, mat, co);
add_v3_v3(data, d);
}
}
}
}
}
if(!invert) {
ccgSubSurf_stitchFaces(ccgdm->ss, 0, NULL, 0);
ccgSubSurf_updateNormals(ccgdm->ss, NULL, 0);
}
}
static void multiresModifier_update(DerivedMesh *dm)
{
CCGDerivedMesh *ccgdm= (CCGDerivedMesh*)dm;
Object *ob;
Mesh *me;
MDisps *mdisps;
MultiresModifierData *mmd;
ob = ccgdm->multires.ob;
me = ccgdm->multires.ob->data;
mmd = ccgdm->multires.mmd;
multires_set_tot_mdisps(me, mmd->totlvl);
CustomData_external_read(&me->fdata, &me->id, CD_MASK_MDISPS, me->totface);
mdisps = CustomData_get_layer(&me->fdata, CD_MDISPS);
if(mdisps) {
int lvl = ccgdm->multires.lvl;
int totlvl = ccgdm->multires.totlvl;
if(lvl < totlvl) {
Mesh *me = ob->data;
DerivedMesh *lowdm, *cddm, *highdm;
DMGridData **highGridData, **lowGridData, **subGridData, **gridData, *diffGrid;
CCGSubSurf *ss;
int i, j, numGrids, highGridSize, lowGridSize;
/* create subsurf DM from original mesh at high level */
if (ob->derivedDeform) cddm = CDDM_copy(ob->derivedDeform);
else cddm = CDDM_from_mesh(me, NULL);
DM_set_only_copy(cddm, CD_MASK_BAREMESH);
highdm = subsurf_dm_create_local(ob, cddm, totlvl, mmd->simple, 0, mmd->flags & eMultiresModifierFlag_PlainUv);
/* create multires DM from original mesh and displacements */
lowdm = multires_dm_create_local(ob, cddm, lvl, totlvl, mmd->simple);
cddm->release(cddm);
/* gather grid data */
numGrids = highdm->getNumGrids(highdm);
highGridSize = highdm->getGridSize(highdm);
highGridData = highdm->getGridData(highdm);
lowGridSize = lowdm->getGridSize(lowdm);
lowGridData = lowdm->getGridData(lowdm);
gridData = dm->getGridData(dm);
subGridData = MEM_callocN(sizeof(DMGridData*)*numGrids, "subGridData*");
diffGrid = MEM_callocN(sizeof(DMGridData)*lowGridSize*lowGridSize, "diff");
for(i = 0; i < numGrids; ++i) {
/* backup subsurf grids */
subGridData[i] = MEM_callocN(sizeof(DMGridData)*highGridSize*highGridSize, "subGridData");
memcpy(subGridData[i], highGridData[i], sizeof(DMGridData)*highGridSize*highGridSize);
/* write difference of subsurf and displaced low level into high subsurf */
for(j = 0; j < lowGridSize*lowGridSize; ++j)
sub_v3_v3v3(diffGrid[j].co, gridData[i][j].co, lowGridData[i][j].co);
multires_copy_dm_grid(highGridData[i], diffGrid, highGridSize, lowGridSize);
}
/* lower level dm no longer needed at this point */
MEM_freeN(diffGrid);
lowdm->release(lowdm);
/* subsurf higher levels again with difference of coordinates */
ss= ((CCGDerivedMesh*)highdm)->ss;
ccgSubSurf_updateFromFaces(ss, lvl, NULL, 0);
ccgSubSurf_updateLevels(ss, lvl, NULL, 0);
/* add to displacements */
multiresModifier_disp_run(highdm, me, 1, 1, subGridData, mmd->totlvl);
/* free */
highdm->release(highdm);
for(i = 0; i < numGrids; ++i)
MEM_freeN(subGridData[i]);
MEM_freeN(subGridData);
}
else {
DerivedMesh *cddm, *subdm;
if (ob->derivedDeform) cddm = CDDM_copy(ob->derivedDeform);
else cddm = CDDM_from_mesh(me, NULL);
DM_set_only_copy(cddm, CD_MASK_BAREMESH);
subdm = subsurf_dm_create_local(ob, cddm, mmd->totlvl, mmd->simple, 0, mmd->flags & eMultiresModifierFlag_PlainUv);
cddm->release(cddm);
multiresModifier_disp_run(dm, me, 1, 0, subdm->getGridData(subdm), mmd->totlvl);
subdm->release(subdm);
}
}
}
void multires_stitch_grids(Object *ob)
{
/* utility for smooth brush */
if(ob && ob->derivedFinal) {
CCGDerivedMesh *ccgdm = (CCGDerivedMesh*)ob->derivedFinal;
CCGFace **faces;
int totface;
if(ccgdm->pbvh) {
BLI_pbvh_get_grid_updates(ccgdm->pbvh, 0, (void***)&faces, &totface);
if(totface) {
ccgSubSurf_stitchFaces(ccgdm->ss, 0, faces, totface);
MEM_freeN(faces);
}
}
}
}
DerivedMesh *multires_dm_create_from_derived(MultiresModifierData *mmd, int local_mmd, DerivedMesh *dm, Object *ob,
int useRenderParams, int UNUSED(isFinalCalc))
{
Mesh *me= ob->data;
DerivedMesh *result;
CCGDerivedMesh *ccgdm;
DMGridData **gridData, **subGridData;
int lvl= multires_get_level(ob, mmd, useRenderParams);
int i, gridSize, numGrids;
if(lvl == 0)
return dm;
result = subsurf_dm_create_local(ob, dm, lvl,
mmd->simple, mmd->flags & eMultiresModifierFlag_ControlEdges,
mmd->flags & eMultiresModifierFlag_PlainUv);
if(!local_mmd) {
ccgdm = (CCGDerivedMesh*)result;
ccgdm->multires.ob = ob;
ccgdm->multires.mmd = mmd;
ccgdm->multires.local_mmd = local_mmd;
ccgdm->multires.lvl = lvl;
ccgdm->multires.totlvl = mmd->totlvl;
ccgdm->multires.modified = 0;
ccgdm->multires.update = multiresModifier_update;
}
numGrids = result->getNumGrids(result);
gridSize = result->getGridSize(result);
gridData = result->getGridData(result);
subGridData = MEM_callocN(sizeof(DMGridData*)*numGrids, "subGridData*");
for(i = 0; i < numGrids; i++) {
subGridData[i] = MEM_callocN(sizeof(DMGridData)*gridSize*gridSize, "subGridData");
memcpy(subGridData[i], gridData[i], sizeof(DMGridData)*gridSize*gridSize);
}
multires_set_tot_mdisps(me, mmd->totlvl);
CustomData_external_read(&me->fdata, &me->id, CD_MASK_MDISPS, me->totface);
multiresModifier_disp_run(result, ob->data, 0, 0, subGridData, mmd->totlvl);
for(i = 0; i < numGrids; i++)
MEM_freeN(subGridData[i]);
MEM_freeN(subGridData);
return result;
}
/**** Old Multires code ****
***************************/
/* Adapted from sculptmode.c */
void old_mdisps_bilinear(float out[3], float (*disps)[3], const int st, float u, float v)
{
int x, y, x2, y2;
const int st_max = st - 1;
float urat, vrat, uopp;
float d[4][3], d2[2][3];
if(u < 0)
u = 0;
else if(u >= st)
u = st_max;
if(v < 0)
v = 0;
else if(v >= st)
v = st_max;
x = floor(u);
y = floor(v);
x2 = x + 1;
y2 = y + 1;
if(x2 >= st) x2 = st_max;
if(y2 >= st) y2 = st_max;
urat = u - x;
vrat = v - y;
uopp = 1 - urat;
mul_v3_v3fl(d[0], disps[y * st + x], uopp);
mul_v3_v3fl(d[1], disps[y * st + x2], urat);
mul_v3_v3fl(d[2], disps[y2 * st + x], uopp);
mul_v3_v3fl(d[3], disps[y2 * st + x2], urat);
add_v3_v3v3(d2[0], d[0], d[1]);
add_v3_v3v3(d2[1], d[2], d[3]);
mul_v3_fl(d2[0], 1 - vrat);
mul_v3_fl(d2[1], vrat);
add_v3_v3v3(out, d2[0], d2[1]);
}
static void old_mdisps_rotate(int S, int UNUSED(newside), int oldside, int x, int y, float *u, float *v)
{
float offset = oldside*0.5f - 0.5f;
if(S == 1) { *u= offset + x; *v = offset - y; }
if(S == 2) { *u= offset + y; *v = offset + x; }
if(S == 3) { *u= offset - x; *v = offset + y; }
if(S == 0) { *u= offset - y; *v = offset - x; }
}
static void old_mdisps_convert(MFace *mface, MDisps *mdisp)
{
int newlvl = log(sqrt(mdisp->totdisp)-1)/M_LN2;
int oldlvl = newlvl+1;
int oldside = multires_side_tot[oldlvl];
int newside = multires_side_tot[newlvl];
int nvert = (mface->v4)? 4: 3;
int newtotdisp = multires_grid_tot[newlvl]*nvert;
int x, y, S;
float (*disps)[3], (*out)[3], u = 0.0f, v = 0.0f; /* Quite gcc barking. */
disps = MEM_callocN(sizeof(float) * 3 * newtotdisp, "multires disps");
out = disps;
for(S = 0; S < nvert; S++) {
for(y = 0; y < newside; ++y) {
for(x = 0; x < newside; ++x, ++out) {
old_mdisps_rotate(S, newside, oldside, x, y, &u, &v);
old_mdisps_bilinear(*out, mdisp->disps, oldside, u, v);
if(S == 1) { (*out)[1]= -(*out)[1]; }
else if(S == 2) { SWAP(float, (*out)[0], (*out)[1]); }
else if(S == 3) { (*out)[0]= -(*out)[0]; }
else if(S == 0) { SWAP(float, (*out)[0], (*out)[1]); (*out)[0]= -(*out)[0]; (*out)[1]= -(*out)[1]; };
}
}
}
MEM_freeN(mdisp->disps);
mdisp->totdisp= newtotdisp;
mdisp->disps= disps;
}
void multires_load_old_250(Mesh *me)
{
MDisps *mdisps;
int a;
mdisps= CustomData_get_layer(&me->fdata, CD_MDISPS);
if(mdisps) {
for(a=0; a<me->totface; a++)
if(mdisps[a].totdisp)
old_mdisps_convert(&me->mface[a], &mdisps[a]);
}
}
/* Does not actually free lvl itself */
static void multires_free_level(MultiresLevel *lvl)
{
if(lvl) {
if(lvl->faces) MEM_freeN(lvl->faces);
if(lvl->edges) MEM_freeN(lvl->edges);
if(lvl->colfaces) MEM_freeN(lvl->colfaces);
}
}
void multires_free(Multires *mr)
{
if(mr) {
MultiresLevel* lvl= mr->levels.first;
/* Free the first-level data */
if(lvl) {
CustomData_free(&mr->vdata, lvl->totvert);
CustomData_free(&mr->fdata, lvl->totface);
if(mr->edge_flags)
MEM_freeN(mr->edge_flags);
if(mr->edge_creases)
MEM_freeN(mr->edge_creases);
}
while(lvl) {
multires_free_level(lvl);
lvl= lvl->next;
}
MEM_freeN(mr->verts);
BLI_freelistN(&mr->levels);
MEM_freeN(mr);
}
}
static void create_old_vert_face_map(ListBase **map, IndexNode **mem, const MultiresFace *mface,
const int totvert, const int totface)
{
int i,j;
IndexNode *node = NULL;
(*map) = MEM_callocN(sizeof(ListBase) * totvert, "vert face map");
(*mem) = MEM_callocN(sizeof(IndexNode) * totface*4, "vert face map mem");
node = *mem;
/* Find the users */
for(i = 0; i < totface; ++i){
for(j = 0; j < (mface[i].v[3]?4:3); ++j, ++node) {
node->index = i;
BLI_addtail(&(*map)[mface[i].v[j]], node);
}
}
}
static void create_old_vert_edge_map(ListBase **map, IndexNode **mem, const MultiresEdge *medge,
const int totvert, const int totedge)
{
int i,j;
IndexNode *node = NULL;
(*map) = MEM_callocN(sizeof(ListBase) * totvert, "vert edge map");
(*mem) = MEM_callocN(sizeof(IndexNode) * totedge*2, "vert edge map mem");
node = *mem;
/* Find the users */
for(i = 0; i < totedge; ++i){
for(j = 0; j < 2; ++j, ++node) {
node->index = i;
BLI_addtail(&(*map)[medge[i].v[j]], node);
}
}
}
static MultiresFace *find_old_face(ListBase *map, MultiresFace *faces, int v1, int v2, int v3, int v4)
{
IndexNode *n1;
int v[4], i, j;
v[0]= v1;
v[1]= v2;
v[2]= v3;
v[3]= v4;
for(n1 = map[v1].first; n1; n1 = n1->next) {
int fnd[4] = {0, 0, 0, 0};
for(i = 0; i < 4; ++i) {
for(j = 0; j < 4; ++j) {
if(v[i] == faces[n1->index].v[j])
fnd[i] = 1;
}
}
if(fnd[0] && fnd[1] && fnd[2] && fnd[3])
return &faces[n1->index];
}
return NULL;
}
static MultiresEdge *find_old_edge(ListBase *map, MultiresEdge *edges, int v1, int v2)
{
IndexNode *n1, *n2;
for(n1 = map[v1].first; n1; n1 = n1->next) {
for(n2 = map[v2].first; n2; n2 = n2->next) {
if(n1->index == n2->index)
return &edges[n1->index];
}
}
return NULL;
}
static void multires_load_old_edges(ListBase **emap, MultiresLevel *lvl, int *vvmap, int dst, int v1, int v2, int mov)
{
int emid = find_old_edge(emap[2], lvl->edges, v1, v2)->mid;
vvmap[dst + mov] = emid;
if(lvl->next->next) {
multires_load_old_edges(emap + 1, lvl->next, vvmap, dst + mov, v1, emid, mov / 2);
multires_load_old_edges(emap + 1, lvl->next, vvmap, dst + mov, v2, emid, -mov / 2);
}
}
static void multires_load_old_faces(ListBase **fmap, ListBase **emap, MultiresLevel *lvl, int *vvmap, int dst,
int v1, int v2, int v3, int v4, int st2, int st3)
{
int fmid;
int emid13, emid14, emid23, emid24;
if(lvl && lvl->next) {
fmid = find_old_face(fmap[1], lvl->faces, v1, v2, v3, v4)->mid;
vvmap[dst] = fmid;
emid13 = find_old_edge(emap[1], lvl->edges, v1, v3)->mid;
emid14 = find_old_edge(emap[1], lvl->edges, v1, v4)->mid;
emid23 = find_old_edge(emap[1], lvl->edges, v2, v3)->mid;
emid24 = find_old_edge(emap[1], lvl->edges, v2, v4)->mid;
multires_load_old_faces(fmap + 1, emap + 1, lvl->next, vvmap, dst + st2 * st3 + st3,
fmid, v2, emid23, emid24, st2, st3 / 2);
multires_load_old_faces(fmap + 1, emap + 1, lvl->next, vvmap, dst - st2 * st3 + st3,
emid14, emid24, fmid, v4, st2, st3 / 2);
multires_load_old_faces(fmap + 1, emap + 1, lvl->next, vvmap, dst + st2 * st3 - st3,
emid13, emid23, v3, fmid, st2, st3 / 2);
multires_load_old_faces(fmap + 1, emap + 1, lvl->next, vvmap, dst - st2 * st3 - st3,
v1, fmid, emid13, emid14, st2, st3 / 2);
if(lvl->next->next) {
multires_load_old_edges(emap, lvl->next, vvmap, dst, emid24, fmid, st3);
multires_load_old_edges(emap, lvl->next, vvmap, dst, emid13, fmid, -st3);
multires_load_old_edges(emap, lvl->next, vvmap, dst, emid14, fmid, -st2 * st3);
multires_load_old_edges(emap, lvl->next, vvmap, dst, emid23, fmid, st2 * st3);
}
}
}
static void multires_mvert_to_ss(DerivedMesh *dm, MVert *mvert)
{
CCGDerivedMesh *ccgdm = (CCGDerivedMesh*) dm;
CCGSubSurf *ss = ccgdm->ss;
DMGridData *vd;
int index;
int totvert, totedge, totface;
int gridSize = ccgSubSurf_getGridSize(ss);
int edgeSize = ccgSubSurf_getEdgeSize(ss);
int i = 0;
totface = ccgSubSurf_getNumFaces(ss);
for(index = 0; index < totface; index++) {
CCGFace *f = ccgdm->faceMap[index].face;
int x, y, S, numVerts = ccgSubSurf_getFaceNumVerts(f);
vd= ccgSubSurf_getFaceCenterData(f);
copy_v3_v3(vd->co, mvert[i].co);
i++;
for(S = 0; S < numVerts; S++) {
for(x = 1; x < gridSize - 1; x++, i++) {
vd= ccgSubSurf_getFaceGridEdgeData(ss, f, S, x);
copy_v3_v3(vd->co, mvert[i].co);
}
}
for(S = 0; S < numVerts; S++) {
for(y = 1; y < gridSize - 1; y++) {
for(x = 1; x < gridSize - 1; x++, i++) {
vd= ccgSubSurf_getFaceGridData(ss, f, S, x, y);
copy_v3_v3(vd->co, mvert[i].co);
}
}
}
}
totedge = ccgSubSurf_getNumEdges(ss);
for(index = 0; index < totedge; index++) {
CCGEdge *e = ccgdm->edgeMap[index].edge;
int x;
for(x = 1; x < edgeSize - 1; x++, i++) {
vd= ccgSubSurf_getEdgeData(ss, e, x);
copy_v3_v3(vd->co, mvert[i].co);
}
}
totvert = ccgSubSurf_getNumVerts(ss);
for(index = 0; index < totvert; index++) {
CCGVert *v = ccgdm->vertMap[index].vert;
vd= ccgSubSurf_getVertData(ss, v);
copy_v3_v3(vd->co, mvert[i].co);
i++;
}
ccgSubSurf_updateToFaces(ss, 0, NULL, 0);
}
/* Loads a multires object stored in the old Multires struct into the new format */
static void multires_load_old_dm(DerivedMesh *dm, Mesh *me, int totlvl)
{
MultiresLevel *lvl, *lvl1;
Multires *mr= me->mr;
MVert *vsrc, *vdst;
unsigned int src, dst;
int st = multires_side_tot[totlvl - 1] - 1;
int extedgelen = multires_side_tot[totlvl] - 2;
int *vvmap; // inorder for dst, map to src
int crossedgelen;
int s, x, tottri, totquad;
unsigned int i, j, totvert;
src = 0;
vsrc = mr->verts;
vdst = dm->getVertArray(dm);
totvert = (unsigned int)dm->getNumVerts(dm);
vvmap = MEM_callocN(sizeof(int) * totvert, "multires vvmap");
lvl1 = mr->levels.first;
/* Load base verts */
for(i = 0; i < lvl1->totvert; ++i) {
vvmap[totvert - lvl1->totvert + i] = src;
++src;
}
/* Original edges */
dst = totvert - lvl1->totvert - extedgelen * lvl1->totedge;
for(i = 0; i < lvl1->totedge; ++i) {
int ldst = dst + extedgelen * i;
int lsrc = src;
lvl = lvl1->next;
for(j = 2; j <= mr->level_count; ++j) {
int base = multires_side_tot[totlvl - j + 1] - 2;
int skip = multires_side_tot[totlvl - j + 2] - 1;
int st = multires_side_tot[j - 1] - 1;
for(x = 0; x < st; ++x)
vvmap[ldst + base + x * skip] = lsrc + st * i + x;
lsrc += lvl->totvert - lvl->prev->totvert;
lvl = lvl->next;
}
}
/* Center points */
dst = 0;
for(i = 0; i < lvl1->totface; ++i) {
int sides = lvl1->faces[i].v[3] ? 4 : 3;
vvmap[dst] = src + lvl1->totedge + i;
dst += 1 + sides * (st - 1) * st;
}
/* The rest is only for level 3 and up */
if(lvl1->next && lvl1->next->next) {
ListBase **fmap, **emap;
IndexNode **fmem, **emem;
/* Face edge cross */
tottri = totquad = 0;
crossedgelen = multires_side_tot[totlvl - 1] - 2;
dst = 0;
for(i = 0; i < lvl1->totface; ++i) {
int sides = lvl1->faces[i].v[3] ? 4 : 3;
lvl = lvl1->next->next;
++dst;
for(j = 3; j <= mr->level_count; ++j) {
int base = multires_side_tot[totlvl - j + 1] - 2;
int skip = multires_side_tot[totlvl - j + 2] - 1;
int st = pow(2, j - 2);
int st2 = pow(2, j - 3);
int lsrc = lvl->prev->totvert;
/* Skip exterior edge verts */
lsrc += lvl1->totedge * st;
/* Skip earlier face edge crosses */
lsrc += st2 * (tottri * 3 + totquad * 4);
for(s = 0; s < sides; ++s) {
for(x = 0; x < st2; ++x) {
vvmap[dst + crossedgelen * (s + 1) - base - x * skip - 1] = lsrc;
++lsrc;
}
}
lvl = lvl->next;
}
dst += sides * (st - 1) * st;
if(sides == 4) ++totquad;
else ++tottri;
}
/* calculate vert to edge/face maps for each level (except the last) */
fmap = MEM_callocN(sizeof(ListBase*) * (mr->level_count-1), "multires fmap");
emap = MEM_callocN(sizeof(ListBase*) * (mr->level_count-1), "multires emap");
fmem = MEM_callocN(sizeof(IndexNode*) * (mr->level_count-1), "multires fmem");
emem = MEM_callocN(sizeof(IndexNode*) * (mr->level_count-1), "multires emem");
lvl = lvl1;
for(i = 0; i < (unsigned int)mr->level_count - 1; ++i) {
create_old_vert_face_map(fmap + i, fmem + i, lvl->faces, lvl->totvert, lvl->totface);
create_old_vert_edge_map(emap + i, emem + i, lvl->edges, lvl->totvert, lvl->totedge);
lvl = lvl->next;
}
/* Interior face verts */
/* lvl = lvl1->next->next; */ /* UNUSED */
dst = 0;
for(j = 0; j < lvl1->totface; ++j) {
int sides = lvl1->faces[j].v[3] ? 4 : 3;
int ldst = dst + 1 + sides * (st - 1);
for(s = 0; s < sides; ++s) {
int st2 = multires_side_tot[totlvl - 1] - 2;
int st3 = multires_side_tot[totlvl - 2] - 2;
int st4 = st3 == 0 ? 1 : (st3 + 1) / 2;
int mid = ldst + st2 * st3 + st3;
int cv = lvl1->faces[j].v[s];
int nv = lvl1->faces[j].v[s == sides - 1 ? 0 : s + 1];
int pv = lvl1->faces[j].v[s == 0 ? sides - 1 : s - 1];
multires_load_old_faces(fmap, emap, lvl1->next, vvmap, mid,
vvmap[dst], cv,
find_old_edge(emap[0], lvl1->edges, pv, cv)->mid,
find_old_edge(emap[0], lvl1->edges, cv, nv)->mid,
st2, st4);
ldst += (st - 1) * (st - 1);
}
dst = ldst;
}
/*lvl = lvl->next;*/ /*UNUSED*/
for(i = 0; i < (unsigned int)(mr->level_count - 1); ++i) {
MEM_freeN(fmap[i]);
MEM_freeN(fmem[i]);
MEM_freeN(emap[i]);
MEM_freeN(emem[i]);
}
MEM_freeN(fmap);
MEM_freeN(emap);
MEM_freeN(fmem);
MEM_freeN(emem);
}
/* Transfer verts */
for(i = 0; i < totvert; ++i)
copy_v3_v3(vdst[i].co, vsrc[vvmap[i]].co);
MEM_freeN(vvmap);
multires_mvert_to_ss(dm, vdst);
}
/* Copy the first-level vcol data to the mesh, if it exists */
/* Warning: higher-level vcol data will be lost */
static void multires_load_old_vcols(Mesh *me)
{
MultiresLevel *lvl;
MultiresColFace *colface;
MCol *mcol;
int i, j;
if(!(lvl = me->mr->levels.first))
return;
if(!(colface = lvl->colfaces))
return;
/* older multires format never supported multiple vcol layers,
so we can assume the active vcol layer is the correct one */
if(!(mcol = CustomData_get_layer(&me->fdata, CD_MCOL)))
return;
for(i = 0; i < me->totface; ++i) {
for(j = 0; j < 4; ++j) {
mcol[i*4 + j].a = colface[i].col[j].a;
mcol[i*4 + j].r = colface[i].col[j].r;
mcol[i*4 + j].g = colface[i].col[j].g;
mcol[i*4 + j].b = colface[i].col[j].b;
}
}
}
/* Copy the first-level face-flag data to the mesh */
static void multires_load_old_face_flags(Mesh *me)
{
MultiresLevel *lvl;
MultiresFace *faces;
int i;
if(!(lvl = me->mr->levels.first))
return;
if(!(faces = lvl->faces))
return;
for(i = 0; i < me->totface; ++i)
me->mface[i].flag = faces[i].flag;
}
void multires_load_old(Object *ob, Mesh *me)
{
MultiresLevel *lvl;
ModifierData *md;
MultiresModifierData *mmd;
DerivedMesh *dm, *orig;
CustomDataLayer *l;
int i;
/* Load original level into the mesh */
lvl = me->mr->levels.first;
CustomData_free_layers(&me->vdata, CD_MVERT, lvl->totvert);
CustomData_free_layers(&me->edata, CD_MEDGE, lvl->totedge);
CustomData_free_layers(&me->fdata, CD_MFACE, lvl->totface);
me->totvert = lvl->totvert;
me->totedge = lvl->totedge;
me->totface = lvl->totface;
me->mvert = CustomData_add_layer(&me->vdata, CD_MVERT, CD_CALLOC, NULL, me->totvert);
me->medge = CustomData_add_layer(&me->edata, CD_MEDGE, CD_CALLOC, NULL, me->totedge);
me->mface = CustomData_add_layer(&me->fdata, CD_MFACE, CD_CALLOC, NULL, me->totface);
memcpy(me->mvert, me->mr->verts, sizeof(MVert) * me->totvert);
for(i = 0; i < me->totedge; ++i) {
me->medge[i].v1 = lvl->edges[i].v[0];
me->medge[i].v2 = lvl->edges[i].v[1];
}
for(i = 0; i < me->totface; ++i) {
me->mface[i].v1 = lvl->faces[i].v[0];
me->mface[i].v2 = lvl->faces[i].v[1];
me->mface[i].v3 = lvl->faces[i].v[2];
me->mface[i].v4 = lvl->faces[i].v[3];
me->mface[i].mat_nr = lvl->faces[i].mat_nr;
}
/* Add a multires modifier to the object */
md = ob->modifiers.first;
while(md && modifierType_getInfo(md->type)->type == eModifierTypeType_OnlyDeform)
md = md->next;
mmd = (MultiresModifierData*)modifier_new(eModifierType_Multires);
BLI_insertlinkbefore(&ob->modifiers, md, mmd);
for(i = 0; i < me->mr->level_count - 1; ++i)
multiresModifier_subdivide(mmd, ob, 1, 0);
mmd->lvl = mmd->totlvl;
orig = CDDM_from_mesh(me, NULL);
dm = multires_dm_create_from_derived(mmd, 0, orig, ob, 0, 0);
multires_load_old_dm(dm, me, mmd->totlvl+1);
multires_dm_mark_as_modified(dm);
dm->release(dm);
orig->release(orig);
/* Copy the first-level data to the mesh */
for(i = 0, l = me->mr->vdata.layers; i < me->mr->vdata.totlayer; ++i, ++l)
CustomData_add_layer(&me->vdata, l->type, CD_REFERENCE, l->data, me->totvert);
for(i = 0, l = me->mr->fdata.layers; i < me->mr->fdata.totlayer; ++i, ++l)
CustomData_add_layer(&me->fdata, l->type, CD_REFERENCE, l->data, me->totface);
memset(&me->mr->vdata, 0, sizeof(CustomData));
memset(&me->mr->fdata, 0, sizeof(CustomData));
multires_load_old_vcols(me);
multires_load_old_face_flags(me);
/* Remove the old multires */
multires_free(me->mr);
me->mr= NULL;
}
static void multires_sync_levels(Scene *scene, Object *ob, Object *to_ob)
{
MultiresModifierData *mmd= get_multires_modifier(scene, ob, 1);
MultiresModifierData *to_mmd= get_multires_modifier(scene, to_ob, 1);
if(!mmd) {
/* object could have MDISP even when there is no multires modifier
this could lead to troubles due to i've got no idea how mdisp could be
upsampled correct without modifier data.
just remove mdisps if no multires present (nazgul) */
Mesh *me= (Mesh*)ob->data;
CustomData_external_remove(&me->fdata, &me->id, CD_MDISPS, me->totface);
CustomData_free_layer_active(&me->fdata, CD_MDISPS, me->totface);
}
if(!mmd || !to_mmd) return;
if(mmd->totlvl>to_mmd->totlvl) multires_del_higher(mmd, ob, to_mmd->totlvl);
else multires_subdivide(mmd, ob, to_mmd->totlvl, 0, mmd->simple);
}
static void multires_apply_smat(Scene *scene, Object *ob, float smat[3][3])
{
DerivedMesh *dm= NULL, *cddm= NULL, *subdm= NULL;
DMGridData **gridData, **subGridData;
Mesh *me= (Mesh*)ob->data;
MFace *mface= me->mface;
MDisps *mdisps;
int *gridOffset;
int i, /*numGrids,*/ gridSize, dGridSize, dSkip, totvert;
float (*vertCos)[3] = NULL;
MultiresModifierData *mmd= get_multires_modifier(scene, ob, 1);
MultiresModifierData high_mmd;
CustomData_external_read(&me->fdata, &me->id, CD_MASK_MDISPS, me->totface);
mdisps= CustomData_get_layer(&me->fdata, CD_MDISPS);
if(!mdisps || !mmd) return;
/* we need derived mesh created from highest resolution */
high_mmd= *mmd;
high_mmd.lvl= high_mmd.totlvl;
/* unscaled multires with applied displacement */
subdm= get_multires_dm(scene, &high_mmd, ob);
/* prepare scaled CDDM to create ccgDN */
cddm= mesh_get_derived_deform(scene, ob, CD_MASK_BAREMESH);
totvert= cddm->getNumVerts(cddm);
vertCos= MEM_mallocN(sizeof(*vertCos) * totvert, "multiresScale vertCos");
cddm->getVertCos(cddm, vertCos);
for(i=0; i<totvert; i++)
mul_m3_v3(smat, vertCos[i]);
CDDM_apply_vert_coords(cddm, vertCos);
MEM_freeN(vertCos);
/* scaled ccgDM for tangent space of object with applied scale */
dm= subsurf_dm_create_local(ob, cddm, high_mmd.totlvl, high_mmd.simple, 0, mmd->flags & eMultiresModifierFlag_PlainUv);
cddm->release(cddm);
/*numGrids= dm->getNumGrids(dm);*/ /*UNUSED*/
gridSize= dm->getGridSize(dm);
gridData= dm->getGridData(dm);
gridOffset= dm->getGridOffset(dm);
subGridData= subdm->getGridData(subdm);
dGridSize= multires_side_tot[high_mmd.totlvl];
dSkip= (dGridSize-1)/(gridSize-1);
#pragma omp parallel for private(i) if(me->totface*gridSize*gridSize*4 >= CCG_OMP_LIMIT)
for(i = 0; i < me->totface; ++i) {
const int numVerts= mface[i].v4 ? 4 : 3;
MDisps *mdisp= &mdisps[i];
int S, x, y, gIndex = gridOffset[i];
for(S = 0; S < numVerts; ++S, ++gIndex) {
DMGridData *grid= gridData[gIndex];
DMGridData *subgrid= subGridData[gIndex];
float (*dispgrid)[3]= &mdisp->disps[S*dGridSize*dGridSize];
for(y = 0; y < gridSize; y++) {
for(x = 0; x < gridSize; x++) {
float *co= grid[x + y*gridSize].co;
float *sco= subgrid[x + y*gridSize].co;
float *no= grid[x + y*gridSize].no;
float *data= dispgrid[dGridSize*y*dSkip + x*dSkip];
float mat[3][3], tx[3], ty[3], disp[3];
/* construct tangent space matrix */
grid_tangent(gridSize, gIndex, x, y, 0, gridData, tx);
normalize_v3(tx);
grid_tangent(gridSize, gIndex, x, y, 1, gridData, ty);
normalize_v3(ty);
column_vectors_to_mat3(mat, tx, ty, no);
/* scale subgrid coord and calculate displacement */
mul_m3_v3(smat, sco);
sub_v3_v3v3(disp, sco, co);
/* convert difference to tangent space */
invert_m3(mat);
mul_v3_m3v3(data, mat, disp);
}
}
}
}
dm->release(dm);
subdm->release(subdm);
}
int multires_mdisp_corners(MDisps *s)
{
int lvl= 13;
while(lvl > 0) {
int side = (1 << (lvl-1)) + 1;
if ((s->totdisp % (side*side)) == 0) return s->totdisp / (side*side);
lvl--;
}
return 0;
}
void multiresModifier_scale_disp(Scene *scene, Object *ob)
{
float smat[3][3];
/* object's scale matrix */
object_scale_to_mat3(ob, smat);
multires_apply_smat(scene, ob, smat);
}
void multiresModifier_prepare_join(Scene *scene, Object *ob, Object *to_ob)
{
float smat[3][3], tmat[3][3], mat[3][3];
multires_sync_levels(scene, ob, to_ob);
/* construct scale matrix for displacement */
object_scale_to_mat3(to_ob, tmat);
invert_m3(tmat);
object_scale_to_mat3(ob, smat);
mul_m3_m3m3(mat, smat, tmat);
multires_apply_smat(scene, ob, mat);
}
/* update multires data after topology changing */
void multires_topology_changed(Scene *scene, Object *ob)
{
Mesh *me= (Mesh*)ob->data;
MDisps *mdisp= NULL, *cur= NULL;
int i, grid= 0, corners;
MultiresModifierData *mmd= get_multires_modifier(scene, ob, 1);
if(mmd)
multires_set_tot_mdisps(me, mmd->totlvl);
CustomData_external_read(&me->fdata, &me->id, CD_MASK_MDISPS, me->totface);
mdisp= CustomData_get_layer(&me->fdata, CD_MDISPS);
if(!mdisp) return;
cur= mdisp;
for(i = 0; i < me->totface; i++, cur++) {
if(mdisp->totdisp) {
corners= multires_mdisp_corners(mdisp);
grid= mdisp->totdisp / corners;
break;
}
}
for(i = 0; i < me->totface; i++, mdisp++) {
int nvert= me->mface[i].v4 ? 4 : 3;
/* allocate memory for mdisp, the whole disp layer would be erased otherwise */
if(!mdisp->totdisp || !mdisp->disps) {
if(grid) {
mdisp->totdisp= nvert*grid;
mdisp->disps= MEM_callocN(mdisp->totdisp*sizeof(float)*3, "mdisp topology");
}
continue;
}
corners= multires_mdisp_corners(mdisp);
if(corners!=nvert) {
mdisp->totdisp= (mdisp->totdisp/corners)*nvert;
if(mdisp->disps)
MEM_freeN(mdisp->disps);
mdisp->disps= MEM_callocN(mdisp->totdisp*sizeof(float)*3, "mdisp topology");
}
}
}
/* makes displacement along grid boundary symmetrical */
void multires_mdisp_smooth_bounds(MDisps *disps)
{
int x, y, side, S, corners;
float (*out)[3];
corners = multires_mdisp_corners(disps);
side = sqrt(disps->totdisp / corners);
out = disps->disps;
for(S = 0; S < corners; S++) {
for(y = 0; y < side; ++y) {
for(x = 0; x < side; ++x, ++out) {
float (*dispgrid)[3];
float *data;
if(x != 0 && y != 0) continue;
if(corners == 4) {
if(S == 0) {
if(y == 0) {
dispgrid = &disps->disps[1*side*side];
data = dispgrid[side * x + 0];
(*out)[0] = (*out)[0] + data[1];
(*out)[1] = (*out)[1] - data[0];
(*out)[2] = (*out)[2] + data[2];
mul_v3_fl(*out, 0.5);
data[0] = -(*out)[1];
data[1] = (*out)[0];
data[2] = (*out)[2];
} else if (x == 0) {
dispgrid = &disps->disps[3 * side * side];
data = dispgrid[side * 0 + y];
(*out)[0] = (*out)[0] - data[1];
(*out)[1] = (*out)[1] + data[0];
(*out)[2] = (*out)[2] + data[2];
mul_v3_fl(*out, 0.5);
data[0] = (*out)[1];
data[1] = -(*out)[0];
data[2] = (*out)[2];
}
} else if (S == 2) {
if(y == 0) {
dispgrid = &disps->disps[3 * side * side];
data = dispgrid[side * x + 0];
(*out)[0] = (*out)[0] + data[1];
(*out)[1] = (*out)[1] - data[0];
(*out)[2] = (*out)[2] + data[2];
mul_v3_fl(*out, 0.5);
data[0] = -(*out)[1];
data[1] = (*out)[0];
data[2] = (*out)[2];
} else if(x == 0) {
dispgrid = &disps->disps[1 * side * side];
data = dispgrid[side * 0 + y];
(*out)[0] = (*out)[0] - data[1];
(*out)[1] = (*out)[1] + data[0];
(*out)[2] = (*out)[2] + data[2];
mul_v3_fl(*out, 0.5);
data[0] = (*out)[1];
data[1] = -(*out)[0];
data[2] = (*out)[2];
}
}
} else if (corners == 3) {
if(S == 0) {
if(y == 0) {
dispgrid = &disps->disps[1*side*side];
data = dispgrid[side * x + 0];
(*out)[0] = (*out)[0] + data[1];
(*out)[1] = (*out)[1] - data[0];
(*out)[2] = (*out)[2] + data[2];
mul_v3_fl(*out, 0.5);
data[0] = -(*out)[1];
data[1] = (*out)[0];
data[2] = (*out)[2];
} else if (x == 0) {
dispgrid = &disps->disps[2 * side * side];
data = dispgrid[side * 0 + y];
(*out)[0] = (*out)[0] - data[1];
(*out)[1] = (*out)[1] + data[0];
(*out)[2] = (*out)[2] + data[2];
mul_v3_fl(*out, 0.5);
data[0] = (*out)[1];
data[1] = -(*out)[0];
data[2] = (*out)[2];
}
} else if (S == 2) {
if(x == 0) {
dispgrid = &disps->disps[1 * side * side];
data = dispgrid[side * 0 + y];
(*out)[0] = (*out)[0] - data[1];
(*out)[1] = (*out)[1] + data[0];
(*out)[2] = (*out)[2] + data[2];
mul_v3_fl(*out, 0.5);
data[0] = (*out)[1];
data[1] = -(*out)[0];
data[2] = (*out)[2];
}
}
}
}
}
}
}
/***************** Multires interpolation stuff *****************/
static void mdisp_get_crn_rect(int face_side, float crn[3][4][2])
{
float offset = face_side*0.5f - 0.5f;
float mid[2];
mid[0] = offset * 4 / 3;
mid[1] = offset * 2 / 3;
crn[0][0][0] = mid[0]; crn[0][0][1] = mid[1];
crn[0][1][0] = offset; crn[0][1][1] = 0;
crn[0][2][0] = 0; crn[0][2][1] = 0;
crn[0][3][0] = offset; crn[0][3][1] = offset;
crn[1][0][0] = mid[0]; crn[1][0][1] = mid[1];
crn[1][1][0] = offset * 2; crn[1][1][1] = offset;
crn[1][2][0] = offset * 2; crn[1][2][1] = 0;
crn[1][3][0] = offset; crn[1][3][1] = 0;
crn[2][0][0] = mid[0]; crn[2][0][1] = mid[1];
crn[2][1][0] = offset; crn[2][1][1] = offset;
crn[2][2][0] = offset * 2; crn[2][2][1] = offset * 2;
crn[2][3][0] = offset * 2; crn[2][3][1] = offset;
}
static int mdisp_pt_in_crn(float p[2], float crn[4][2])
{
float v[2][2];
float a[2][2];
sub_v2_v2v2(v[0], crn[1], crn[0]);
sub_v2_v2v2(v[1], crn[3], crn[0]);
sub_v2_v2v2(a[0], p, crn[0]);
sub_v2_v2v2(a[1], crn[2], crn[0]);
if(cross_v2v2(a[0], v[0]) * cross_v2v2(a[1], v[0]) < 0)
return 0;
if(cross_v2v2(a[0], v[1]) * cross_v2v2(a[1], v[1]) < 0)
return 0;
return 1;
}
static void face_to_crn_interp(float u, float v, float v1[2], float v2[2], float v3[2], float v4[2], float *x)
{
float a = (v4[1]-v3[1])*v2[0]+(-v4[1]+v3[1])*v1[0]+(-v2[1]+v1[1])*v4[0]+(v2[1]-v1[1])*v3[0];
float b = (v3[1]-v)*v2[0]+(v4[1]-2*v3[1]+v)*v1[0]+(-v4[1]+v3[1]+v2[1]-v1[1])*u+(v4[0]-v3[0])*v-v1[1]*v4[0]+(-v2[1]+2*v1[1])*v3[0];
float c = (v3[1]-v)*v1[0]+(-v3[1]+v1[1])*u+v3[0]*v-v1[1]*v3[0];
float d = b * b - 4 * a * c;
float x1, x2;
if(a == 0) {
*x = -c / b;
return;
}
x1 = (-b - sqrtf(d)) / (2 * a);
x2 = (-b + sqrtf(d)) / (2 * a);
*x = maxf(x1, x2);
}
void mdisp_rot_crn_to_face(const int S, const int corners, const int face_side, const float x, const float y, float *u, float *v)
{
float offset = face_side*0.5f - 0.5f;
if(corners == 4) {
if(S == 1) { *u= offset + x; *v = offset - y; }
if(S == 2) { *u= offset + y; *v = offset + x; }
if(S == 3) { *u= offset - x; *v = offset + y; }
if(S == 0) { *u= offset - y; *v = offset - x; }
} else {
float crn[3][4][2], vec[4][2];
float p[2];
mdisp_get_crn_rect(face_side, crn);
interp_v2_v2v2(vec[0], crn[S][0], crn[S][1], x / offset);
interp_v2_v2v2(vec[1], crn[S][3], crn[S][2], x / offset);
interp_v2_v2v2(vec[2], crn[S][0], crn[S][3], y / offset);
interp_v2_v2v2(vec[3], crn[S][1], crn[S][2], y / offset);
isect_seg_seg_v2_point(vec[0], vec[1], vec[2], vec[3], p);
(*u) = p[0];
(*v) = p[1];
}
}
/* Find per-corner coordinate with given per-face UV coord */
int mdisp_rot_face_to_crn(const int corners, const int face_side, const float u, const float v, float *x, float *y)
{
const float offset = face_side*0.5f - 0.5f;
int S = 0;
if (corners == 4) {
if(u <= offset && v <= offset) S = 0;
else if(u > offset && v <= offset) S = 1;
else if(u > offset && v > offset) S = 2;
else if(u <= offset && v >= offset) S = 3;
if(S == 0) {
*y = offset - u;
*x = offset - v;
} else if(S == 1) {
*x = u - offset;
*y = offset - v;
} else if(S == 2) {
*y = u - offset;
*x = v - offset;
} else if(S == 3) {
*x= offset - u;
*y = v - offset;
}
} else {
int grid_size = offset;
float w = (face_side - 1) - u - v;
float W1, W2;
if (u >= v && u >= w) {S = 0; W1= w; W2= v;}
else if (v >= u && v >= w) {S = 1; W1 = u; W2 = w;}
else {S = 2; W1 = v; W2 = u;}
W1 /= (face_side-1);
W2 /= (face_side-1);
*x = (1-(2*W1)/(1-W2)) * grid_size;
*y = (1-(2*W2)/(1-W1)) * grid_size;
}
return S;
}
/* Find per-corner coordinate with given per-face UV coord
Practically as the previous funciton but it assumes a bit different coordinate system for triangles
which is optimized for MDISP layer interpolation:
v
^
| /|
| / |
| / |
|/______|___> u
*/
int mdisp_rot_face_to_quad_crn(const int corners, const int face_side, const float u, const float v, float *x, float *y)
{
const float offset = face_side*0.5f - 0.5f;
int S = 0;
if (corners == 4) {
if(u <= offset && v <= offset) S = 0;
else if(u > offset && v <= offset) S = 1;
else if(u > offset && v > offset) S = 2;
else if(u <= offset && v >= offset) S = 3;
if(S == 0) {
*y = offset - u;
*x = offset - v;
} else if(S == 1) {
*x = u - offset;
*y = offset - v;
} else if(S == 2) {
*y = u - offset;
*x = v - offset;
} else if(S == 3) {
*x= offset - u;
*y = v - offset;
}
} else {
float crn[3][4][2];
float p[2] = {u, v};
mdisp_get_crn_rect(face_side, crn);
for (S = 0; S < 3; ++S) {
if (mdisp_pt_in_crn(p, crn[S]))
break;
}
face_to_crn_interp(u, v, crn[S][0], crn[S][1], crn[S][3], crn[S][2], &p[0]);
face_to_crn_interp(u, v, crn[S][0], crn[S][3], crn[S][1], crn[S][2], &p[1]);
*x = p[0] * offset;
*y = p[1] * offset;
}
return S;
}
void mdisp_apply_weight(const int S, const int corners, int x, int y, const int face_side,
float crn_weight[4][2], float *u_r, float *v_r)
{
float u, v, xl, yl;
float mid1[2], mid2[2], mid3[2];
mdisp_rot_crn_to_face(S, corners, face_side, x, y, &u, &v);
if(corners == 4) {
xl = u / (face_side - 1);
yl = v / (face_side - 1);
mid1[0] = crn_weight[0][0] * (1 - xl) + crn_weight[1][0] * xl;
mid1[1] = crn_weight[0][1] * (1 - xl) + crn_weight[1][1] * xl;
mid2[0] = crn_weight[3][0] * (1 - xl) + crn_weight[2][0] * xl;
mid2[1] = crn_weight[3][1] * (1 - xl) + crn_weight[2][1] * xl;
mid3[0] = mid1[0] * (1 - yl) + mid2[0] * yl;
mid3[1] = mid1[1] * (1 - yl) + mid2[1] * yl;
} else {
yl = v / (face_side - 1);
if(v == face_side - 1) xl = 1;
else xl = 1 - (face_side - 1 - u) / (face_side - 1 - v);
mid1[0] = crn_weight[0][0] * (1 - xl) + crn_weight[1][0] * xl;
mid1[1] = crn_weight[0][1] * (1 - xl) + crn_weight[1][1] * xl;
mid3[0] = mid1[0] * (1 - yl) + crn_weight[2][0] * yl;
mid3[1] = mid1[1] * (1 - yl) + crn_weight[2][1] * yl;
}
*u_r = mid3[0];
*v_r = mid3[1];
}
void mdisp_flip_disp(const int S, const int corners, const float axis_x[2], const float axis_y[2], float disp[3])
{
float crn_x[2], crn_y[2];
float vx[2], vy[2], coord[2];
if (corners == 4) {
float x[4][2] = {{0, -1}, {1, 0}, {0, 1}, {-1, 0}};
float y[4][2] = {{-1, 0}, {0, -1}, {1, 0}, {0, 1}};
copy_v2_v2(crn_x, x[S]);
copy_v2_v2(crn_y, y[S]);
mul_v2_v2fl(vx, crn_x, disp[0]);
mul_v2_v2fl(vy, crn_y, disp[1]);
add_v2_v2v2(coord, vx, vy);
project_v2_v2v2(vx, coord, axis_x);
project_v2_v2v2(vy, coord, axis_y);
disp[0] = len_v2(vx);
disp[1] = len_v2(vy);
if(dot_v2v2(vx, axis_x) < 0)
disp[0] = -disp[0];
if(dot_v2v2(vy, axis_y) < 0)
disp[1] = -disp[1];
} else {
/* XXX: it was very overhead code to support displacement flipping
for case of tris without visible profit.
Maybe its not really big limitation? for now? (nazgul) */
disp[0] = 0;
disp[1] = 0;
}
}
/* Join two triangular displacements into one quad
Corners mapping:
2 -------- 3
| \ tri2 |
| \ |
| tri1 \ |
0 -------- 1 */
void mdisp_join_tris(MDisps *dst, MDisps *tri1, MDisps *tri2)
{
int side, st;
int S, x, y, crn;
float face_u, face_v, crn_u, crn_v;
float (*out)[3];
MDisps *src;
if(dst->disps)
MEM_freeN(dst->disps);
side = sqrt(tri1->totdisp / 3);
st = (side<<1)-1;
dst->totdisp = 4 * side * side;
out = dst->disps = MEM_callocN(3*dst->totdisp*sizeof(float), "join disps");
for(S = 0; S < 4; S++)
for(y = 0; y < side; ++y)
for(x = 0; x < side; ++x, ++out) {
mdisp_rot_crn_to_face(S, 4, st, x, y, &face_u, &face_v);
face_u = st - 1 - face_u;
if(face_v > face_u) {
src = tri2;
face_u = st - 1 - face_u;
face_v = st - 1 - face_v;
} else src = tri1;
crn = mdisp_rot_face_to_quad_crn(3, st, face_u, face_v, &crn_u, &crn_v);
old_mdisps_bilinear((*out), &src->disps[crn*side*side], side, crn_u, crn_v);
(*out)[0] = 0;
(*out)[1] = 0;
}
}
|
TemporalReflectionPadding.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/TemporalReflectionPadding.c"
#else
static void THNN_(TemporalReflectionPadding_updateOutput_frame)(
real *input_p, real *output_p,
long nslices,
long iwidth,
long owidth,
int pad_l, int pad_r)
{
int iStartX = fmax(0, -pad_l);
int oStartX = fmax(0, pad_l);
long k, ip_x;
#pragma omp parallel for private(k, ip_x)
for (k = 0; k < nslices; k++)
{
long j;
for (j = 0; j < owidth; j++) {
if (j < pad_l) {
ip_x = pad_l * 2 - j;
} else if (j >= pad_l && j < iwidth + pad_l) {
ip_x = j;
} else {
ip_x = (iwidth + pad_l - 1) * 2 - j;
}
ip_x = ip_x - oStartX + iStartX;
/* real *dest_p = output_p + k*owidth*oheight + i * owidth + j; */
real *dest_p = output_p + k*owidth + j;
real *src_p = input_p + k*iwidth + ip_x;
*dest_p = *src_p;
}
}
}
void THNN_(TemporalReflectionPadding_updateOutput)(THNNState *state,
THTensor *input,
THTensor *output,
int pad_l, int pad_r)
{
int dimw = 1;
int dimslices = 0;
long nbatch = 1;
long nslices;
long iwidth;
long owidth;
real *input_data;
real *output_data;
THNN_ARGCHECK(input->nDimension == 2 || input->nDimension == 3, 2, input,
"2D or 3D (batch mode) tensor expected for input, but got: %s");
if (input->nDimension == 3)
{
nbatch = input->size[0];
dimw++;
dimslices++;
}
/* input size */
nslices = input->size[dimslices];
iwidth = input->size[dimw];
THArgCheck(pad_l < iwidth && pad_r < iwidth, 4,
"Padding size should be less than the corresponding input dimension, "
"but got: padding (%d, %d) at dimension %d of input %s",
pad_l, pad_r, dimw, _THSizeDesc(input->size, input->nDimension).str);
/* output size */
owidth = iwidth + pad_l + pad_r;
THArgCheck(owidth >= 1 , 2,
"input (W: %d)is too small."
" Calculated output W: %d",
iwidth, owidth);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
/* resize output */
if (input->nDimension == 2)
{
THTensor_(resize2d)(output, nslices, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
THNN_(TemporalReflectionPadding_updateOutput_frame)(input_data, output_data,
nslices,
iwidth,
owidth,
pad_l, pad_r);
}
else
{
long p;
THTensor_(resize3d)(output, nbatch, nslices, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(TemporalReflectionPadding_updateOutput_frame)(
input_data+p*nslices*iwidth,
output_data+p*nslices*owidth,
nslices,
iwidth,
owidth,
pad_l, pad_r);
}
}
/* cleanup */
THTensor_(free)(input);
}
static void THNN_(TemporalReflectionPadding_updateGradInput_frame)(
real *ginput_p, real *goutput_p,
long nslices,
long iwidth,
long owidth,
int pad_l, int pad_r)
{
int iStartX = fmax(0, -pad_l);
int oStartX = fmax(0, pad_l);
long k, ip_x;
#pragma omp parallel for private(k, ip_x)
for (k = 0; k < nslices; k++)
{
long j;
for (j = 0; j < owidth; j++) {
if (j < pad_l) {
ip_x = pad_l * 2 - j;
} else if (j >= pad_l && j < iwidth + pad_l) {
ip_x = j;
} else {
ip_x = (iwidth + pad_l - 1) * 2 - j;
}
ip_x = ip_x - oStartX + iStartX;
real *src_p = goutput_p + k*owidth + j;
real *dest_p = ginput_p + k*iwidth + ip_x;
*dest_p += *src_p;
}
}
}
void THNN_(TemporalReflectionPadding_updateGradInput)(THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int pad_l, int pad_r)
{
int dimw = 1;
int dimslices = 0;
long nbatch = 1;
long nslices;
long iwidth;
long owidth;
if (input->nDimension == 3)
{
nbatch = input->size[0];
dimw++;
dimslices++;
}
/* sizes */
nslices = input->size[dimslices];
iwidth = input->size[dimw];
owidth = iwidth + pad_l + pad_r;
THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THTensor_(size)(gradOutput, dimw));
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
/* backprop */
if (input->nDimension == 2) {
THNN_(TemporalReflectionPadding_updateGradInput_frame)(
THTensor_(data)(gradInput),
THTensor_(data)(gradOutput),
nslices,
iwidth,
owidth,
pad_l, pad_r);
} else {
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++) {
THNN_(TemporalReflectionPadding_updateGradInput_frame)(
THTensor_(data)(gradInput) + p * nslices * iwidth,
THTensor_(data)(gradOutput) + p * nslices * owidth,
nslices,
iwidth,
owidth,
pad_l, pad_r);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include <limits>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda/utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(uint64_t N) {
using namespace mshadow::cuda;
uint64_t num_blocks = (N + kBaseThreadNum - 1) / kBaseThreadNum;
CHECK_LE(num_blocks, std::numeric_limits<int32_t>::max());
return static_cast<int>(num_blocks);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*! \brief operator request type switch */
#define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
{ \
const OpReqType ReqType = kNullOp; \
{__VA_ARGS__} \
} \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else if (NDim == 6) { \
const int ndim = 6; \
{__VA_ARGS__} \
} else if (NDim == 7) { \
const int ndim = 7; \
{__VA_ARGS__} \
} else if (NDim == 8) { \
const int ndim = 8; \
{__VA_ARGS__} \
} else if (NDim == 9) { \
const int ndim = 9; \
{__VA_ARGS__} \
} else if (NDim == 10) { \
const int ndim = 10; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
case mshadow::kBfloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
LOG(FATAL) << "This operation does not " \
"support float16"; \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
template <typename T>
struct AccType {
using type = T;
};
template <>
struct AccType<mshadow::half::half_t> {
using type = float;
};
#define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
LOG(FATAL) << "This operation only support " \
"floating point types not uint8"; \
} \
break; \
case mshadow::kInt8: \
{ \
LOG(FATAL) << "This operation only support " \
"floating point types not int8"; \
} \
break; \
case mshadow::kInt32: \
{ \
LOG(FATAL) << "This operation only support " \
"floating point types, not int32"; \
} \
break; \
case mshadow::kInt64: \
{ \
LOG(FATAL) << "This operation only support " \
"floating point types, not int64"; \
} \
break; \
case mshadow::kBool: \
{ \
LOG(FATAL) << "This operation only support " \
"floating point types, not bool"; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
typedef double AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
typedef float AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
typedef uint32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int32_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kBool: \
{ \
typedef bool DType; \
typedef int64_t AType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT_TYPE_SWITCH(type, DType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} \
break; \
case mshadow::kFloat64: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} \
break; \
case mshadow::kFloat16: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kBool: \
{ \
typedef bool DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...)\
switch (type) { \
case mshadow::kFloat32: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not float32"; \
} \
break; \
case mshadow::kFloat64: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not float64"; \
} \
break; \
case mshadow::kFloat16: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not float16"; \
} \
break; \
case mshadow::kUint8: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not uint8"; \
} \
break; \
case mshadow::kInt8: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not int8"; \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kBool: \
{ \
LOG(FATAL) << "This operation only support " \
"integer types, not bool"; \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
#define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
{ \
typedef uint8_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Invalid loading enum type " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
#define MXNET_ADD_ALL_TYPES_WITH_BOOL \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("bfloat16", mshadow::kBfloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64) \
.add_enum("bool", mshadow::kBool)
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates */
template<int ndim>
MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) {
++(*coord)[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
}
return (*coord)[0] < shape[0];
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
/*! \brief input is a tensor and the output is a boolean tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors with a boolean output tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and two scalar value with a boolean output tensor */
template<typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is two tensors with different type and with a boolean output tensor */
template<typename LType, typename RType,
typename std::enable_if<!std::is_same<LType, RType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, bool *out, const LType *lhs, const RType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a half_t output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i,
mshadow::half::half_t *out,
const DType *lhs,
const mshadow::half::half_t *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a float output tensor */
template<typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a double output tensor */
template<typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, float>::value ||
std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief inputs are two tensors with a half_t output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i,
mshadow::half::half_t *out,
const DType *lhs,
const mshadow::half::half_t value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a float output tensor */
template<typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a double output tensor */
template<typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, float>::value ||
std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double value) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value));
}
/*! \brief inputs are two tensors with a float output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is a tensor and a scalar value with a float output tensor */
template<typename DType,
typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch a generic CPU kernel with dynamic schedule. This is recommended
* for irregular workloads such as spmv.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename ...Args>
inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false);
if (omp_threads < 2) {
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads) schedule(dynamic)
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int64_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() function
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
N, static_cast<size_t>(omp_threads))) {
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); ++i) {
OP::Map(i, args...);
}
}
#else
for (size_t i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const auto length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < static_cast<index_t>(N); i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() function
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() function
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
if (0 == N) return;
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<bool val>
struct set_to_bool : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to true and false
*/
using set_true = set_to_bool<true>;
using set_false = set_to_bool<false>;
} // namespace mxnet_op
template<typename xpu, typename DType>
DType *AllocMemory(const OpContext& ctx, index_t num_items, size_t extraStorageBytes = 0) {
const auto workspace_total_bytes = num_items * sizeof(DType) + extraStorageBytes;
mshadow::Tensor<xpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(workspace_total_bytes), ctx.get_stream<xpu>());
return reinterpret_cast<DType *>(workspace.dptr_);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-4,8),ceild(4*t2-Nz-3,16));t3<=min(min(floord(4*Nt+Ny-9,16),floord(2*t1+Ny-3,16)),floord(4*t2+Ny-9,16));t3++) {
for (t4=max(max(ceild(t1-60,64),ceild(4*t2-Nz-115,128)),ceild(16*t3-Ny-115,128));t4<=min(min(min(floord(4*Nt+Nx-9,128),floord(2*t1+Nx-3,128)),floord(4*t2+Nx-9,128)),floord(16*t3+Nx+3,128));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(128*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(32*t2-Nz-4,8)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(16*t1+Ny+29,8)),floord(32*t2+Ny+28,8)),floord(32*t1-32*t2+Nz+Ny+27,8));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(32*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(16*t1+Nx+29,64)),floord(32*t2+Nx+28,64)),floord(8*t3+Nx+4,64)),floord(32*t1-32*t2+Nz+Nx+27,64));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),8*t3+6),64*t4+62),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
eigenvalue.h | //! \file eigenvalue.h
//! \brief Data/functions related to k-eigenvalue calculations
#ifndef OPENMC_EIGENVALUE_H
#define OPENMC_EIGENVALUE_H
#include <array>
#include <cstdint> // for int64_t
#include <vector>
#include "xtensor/xtensor.hpp"
#include "openmc/particle.h"
namespace openmc {
//==============================================================================
// Global variables
//==============================================================================
namespace simulation {
extern double keff_generation; //!< Single-generation k on each processor
extern std::array<double, 2> k_sum; //!< Used to reduce sum and sum_sq
extern std::vector<double> entropy; //!< Shannon entropy at each generation
extern xt::xtensor<double, 1> source_frac; //!< Source fraction for UFS
extern "C" int64_t n_bank;
#pragma omp threadprivate(n_bank)
} // namespace simulation
//==============================================================================
// Non-member functions
//==============================================================================
//! Collect/normalize the tracklength keff from each process
extern "C" void calculate_generation_keff();
//! Calculate mean/standard deviation of keff during active generations
//!
//! This function sets the global variables keff and keff_std which represent
//! the mean and standard deviation of the mean of k-effective over active
//! generations. It also broadcasts the value from the master process.
extern "C" void calculate_average_keff();
//! Calculates a minimum variance estimate of k-effective
//!
//! The minimum variance estimate is based on a linear combination of the
//! collision, absorption, and tracklength estimates. The theory behind this can
//! be found in M. Halperin, "Almost linearly-optimum combination of unbiased
//! estimates," J. Am. Stat. Assoc., 56, 36-43 (1961),
//! doi:10.1080/01621459.1961.10482088. The implementation here follows that
//! described in T. Urbatsch et al., "Estimation and interpretation of keff
//! confidence intervals in MCNP," Nucl. Technol., 111, 169-182 (1995).
//!
//! \param[out] k_combined Estimate of k-effective and its standard deviation
//! \return Error status
extern "C" int openmc_get_keff(double* k_combined);
//! Sample/redistribute source sites from accumulated fission sites
extern "C" void synchronize_bank();
//! Calculates the Shannon entropy of the fission source distribution to assess
//! source convergence
extern "C" void shannon_entropy();
//! Determines the source fraction in each UFS mesh cell and reweights the
//! source bank so that the sum of the weights is equal to n_particles. The
//! 'source_frac' variable is used later to bias the production of fission sites
extern "C" void ufs_count_sites();
//! Get UFS weight corresponding to particle's location
extern "C" double ufs_get_weight(const Particle* p);
} // namespace openmc
#endif // OPENMC_EIGENVALUE_H
|
openmp.h | // -*- C++ -*-
#ifndef GRAPHGRIND_BACKEND_OPENMP_H
#define GRAPHGRIND_BACKEND_OPENMP_H
#include "config.h"
#include "graptor/partitioner.h"
#include "graptor/legacy/parallel.h"
#include "graptor/utils.h"
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <assert.h>
#include <unistd.h>
#include <sched.h>
#include <errno.h>
#include <cstring>
#include <utility>
#include <algorithm>
#ifdef _OPENMP
#include <omp.h>
#endif
#define parallel_for _Pragma("omp parallel for") for
inline uint32_t graptor_num_threads() {
return omp_get_num_threads();
}
#if NUMA
template<typename Fn>
void map_partitionL( const partitioner & part, Fn fn ) {
#pragma omp parallel for proc_bind(spread)
for( unsigned n=0; n < num_numa_node; ++n ) {
auto lo = part.numa_start_of(n);
auto hi = part.numa_end_of(n);
#pragma omp parallel for
for( auto p=lo; p < hi; ++p )
fn( p );
}
}
#else // not NUMA
template<typename Fn>
void map_partitionL( const partitioner & part, Fn fn ) {
auto num_partitions = part.get_num_partitions();
#pragma omp parallel for
for( decltype(num_partitions) p=0; p < num_partitions; ++p )
fn( p );
}
#endif // NUMA
#if NUMA
template<typename Fn>
void map_vertexL( const partitioner & part, Fn fn ) {
map_partitionL( part, [&]( typename partitioner::PID p ) {
auto lo = part.start_of(p);
auto hi = part.end_of(p);
#pragma omp parallel for
for( auto v=lo; v < hi; ++v )
fn( v );
} );
}
#else
template<typename Fn>
void map_vertexL( const partitioner & part, Fn fn ) {
auto n = part.get_num_vertices();
#pragma omp parallel for
for( typename partitioner::VID v=0; v < n; ++v )
fn( v );
}
#endif
#endif // GRAPHGRIND_BACKEND_OPENMP_H
|
kernels.h | void compute_probs(
const double* __restrict alphas,
const double* __restrict rands,
double* __restrict probs,
int n, int K, int M,
int threads, int blocks)
{
#pragma omp target teams distribute parallel for \
num_teams(blocks) thread_limit(threads)
for (int i = 0; i < n; i++) {
double maxval;
int m, k;
int maxind;
double M_d = (double) M;
double w[21]; // w[K]
for(k = 0; k < K; ++k){ // initialize probs (though already done on CPU)
probs[i*K + k] = 0.0;
}
// core computations
for(m = 0; m < M; ++m){ // loop over Monte Carlo iterations
for(k = 0; k < K; ++k){ // generate W ~ N(alpha, 1)
w[k] = alphas[i*K + k] + rands[m*K + k];
}
// determine which category has max W
maxind = K-1;
maxval = w[K-1];
for(k = 0; k < (K-1); ++k){
if(w[k] > maxval){
maxind = k;
maxval = w[k];
}
}
probs[i*K + maxind] += 1.0;
}
// compute final proportions
for(k = 0; k < K; ++k) {
probs[i*K + k] /= M_d;
}
}
}
void compute_probs_unitStrides(
const double* __restrict alphas,
const double* __restrict rands,
double* __restrict probs,
int n, int K, int M,
int threads, int blocks)
{
#pragma omp target teams distribute parallel for \
num_teams(blocks) thread_limit(threads)
for (int i = 0; i < n; i++) {
double maxval;
int m, k;
int maxind;
double M_d = (double) M;
double w[21]; // w[K]
for(k = 0; k < K; ++k){ // initialize probs (though already done on CPU)
probs[k*n + i] = 0.0;
}
// core computations
for(m = 0; m < M; ++m){ // loop over Monte Carlo iterations
for(k = 0; k < K; ++k){ // generate W ~ N(alpha, 1)
// with +i we now have unit strides in inner loop
w[k] = alphas[k*n + i] + rands[k*M + m];
}
// determine which category has max W
maxind = K-1;
maxval = w[K-1];
for(k = 0; k < (K-1); ++k){
if(w[k] > maxval){
maxind = k;
maxval = w[k];
}
}
probs[maxind*n + i] += 1.0;
}
// compute final proportions
for(k = 0; k < K; ++k) {
// unit strides
probs[k*n + i] /= M_d;
}
}
}
void compute_probs_unitStrides_sharedMem(
const double* __restrict alphas,
const double* __restrict rands,
double* __restrict probs,
int n, int K, int M,
int threads, int blocks)
{
#pragma omp target teams num_teams(blocks) thread_limit(threads)
{
double shared[21 * 96 * 2]; // static
#pragma omp parallel
{
int threadIdx_x = omp_get_thread_num();
int threads_per_block = threads;
int i = omp_get_team_num() * threads + threadIdx_x;
if (i < n) {
// set up shared memory: half for probs and half for w
double* probs_shared = shared;
// shared mem is one big block, so need to index into latter portion of it to use for w
double* w = &shared[K*threads_per_block];
double maxval;
int m, k;
int maxind;
double M_d = (double) M;
// initialize shared memory probs
for(k = 0; k < K; ++k) {
probs_shared[k*threads_per_block + threadIdx_x] = 0.0;
}
// core computation
for(m = 0; m < M; ++m){ // loop over Monte Carlo iterations
for(k = 0; k < K; ++k){ // generate W ~ N(alpha, 1)
w[k*threads_per_block + threadIdx_x] = alphas[k*n + i] + rands[k*M + m];
}
maxind = K-1;
maxval = w[(K-1)*threads_per_block + threadIdx_x];
for(k = 0; k < (K-1); ++k){
if(w[k*threads_per_block + threadIdx_x] > maxval){
maxind = k;
maxval = w[k*threads_per_block + threadIdx_x];
}
}
probs_shared[maxind*threads_per_block + threadIdx_x] += 1.0;
}
for(k = 0; k < K; ++k) {
probs_shared[k*threads_per_block + threadIdx_x] /= M_d;
}
// copy to device memory so can be returned to CPU
for(k = 0; k < K; ++k) {
probs[k*n + i] = probs_shared[k*threads_per_block + threadIdx_x];
}
}
}
}
}
|
comm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Copyright (c) 2015 by Contributors
*/
#ifndef MXNET_KVSTORE_COMM_H_
#define MXNET_KVSTORE_COMM_H_
#include <dmlc/omp.h>
#include <string>
#include <algorithm>
#include <utility>
#include <limits>
#include <vector>
#include <tuple>
#include <thread>
#include "mxnet/ndarray.h"
#include "gradient_compression.h"
#include "../ndarray/ndarray_function.h"
#include "../operator/tensor/sparse_retain-inl.h"
#include "./kvstore_utils.h"
namespace mxnet {
namespace kvstore {
/**
* \brief multiple device commmunication
*/
class Comm {
public:
Comm() {
pinned_ctx_ = Context::CPUPinned(0);
}
virtual ~Comm() { }
/**
* \brief init key with the data shape and storage shape
*/
virtual void Init(int key, const NDArrayStorageType stype,
const TShape& shape, int dtype = mshadow::kFloat32) = 0;
/**
* \brief returns src[0] + .. + src[src.size()-1]
*/
virtual const NDArray& Reduce(
int key, const std::vector<NDArray>& src, int priority) = 0;
/**
* \brief copy from src to dst[i] for every i
*/
virtual void Broadcast(
int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) = 0;
/**
* \brief broadcast src to dst[i] with target row_ids for every i
* \param key the identifier key for the stored ndarray
* \param src the source row_sparse ndarray to broadcast
* \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast,
where the row_ids are expected to be unique and sorted in row_id.data()
* \param priority the priority of the operation
*/
virtual void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) = 0;
/**
* \brief return a pinned contex
*/
Context pinned_ctx() const {
return pinned_ctx_;
}
/**
* \brief Sets gradient compression parameters to be able to
* perform reduce with compressed gradients
*/
void SetGradientCompression(std::shared_ptr<GradientCompression> gc) {
gc_ = gc;
}
protected:
Context pinned_ctx_;
std::shared_ptr<GradientCompression> gc_;
};
/**
* \brief an implemention of Comm that first copy data to CPU memeory, and then
* reduce there
*/
class CommCPU : public Comm {
public:
CommCPU() {
nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4);
bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000);
// TODO(junwu) delete the following data member, now for benchmark only
is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0);
}
virtual ~CommCPU() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int type = mshadow::kFloat32) override {
// Delayed allocation - the dense merged buffer might not be used at all if push()
// only sees sparse arrays
bool delay_alloc = true;
merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type);
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
auto& buf = merge_buf_[key];
const auto stype = src[0].storage_type();
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
if (stype == kDefaultStorage) {
return src[0];
} else {
// With 'local' kvstore, we could store the weight on CPU while compute
// the gradient on GPU when the weight is extremely large.
// To avoiding copying the weight to the same context of the gradient,
// we always copy the gradient to merged buf.
NDArray& merged = buf.merged_buf(stype);
CopyFromTo(src[0], &merged, priority);
return merged;
}
}
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
std::vector<Engine::VarHandle> const_vars(src.size() - 1);
std::vector<NDArray> reduce(src.size());
CopyFromTo(src[0], &buf_merged, priority);
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size()-1);
for (size_t j = 0; j < src.size() - 1; ++j) {
// allocate copy buffer
buf.copy_buf[j] = NDArray(
src[0].shape(), pinned_ctx_, false, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 1; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority);
reduce[i] = buf.copy_buf[i-1];
const_vars[i-1] = reduce[i].var();
}
Engine::Get()->PushAsync(
[reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
ReduceSumCPU(reduce);
on_complete();
}, Context::CPU(), const_vars, {reduce[0].var()},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
} else {
// sparse reduce
std::vector<Engine::VarHandle> const_vars(src.size());
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(
src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
const_vars[i] = reduce[i].var();
}
Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(),
ResourceRequest(ResourceRequest::kTempSpace));
Engine::Get()->PushAsync(
[reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
NDArray out = buf_merged;
is_serial_push_?
ReduceSumCPUExSerial(reduce, &out)
: mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out);
on_complete();
}, Context::CPU(), const_vars, {buf_merged.var(), rsc.var},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
}
return buf_merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
int mask = src.ctx().dev_mask();
if (mask == Context::kCPU) {
for (auto d : dst) CopyFromTo(src, d, priority);
} else {
// First copy data to pinned_ctx, then broadcast.
// Note that kv.init initializes the data on pinned_ctx.
// This branch indicates push() with ndarrays on gpus were called,
// and the source is copied to gpu ctx.
// Also indicates that buffers are already initialized during push().
auto& buf = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf, priority);
for (auto d : dst) CopyFromTo(buf, d, priority);
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
using namespace mshadow;
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
CHECK_EQ(src.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with src on gpu context not supported";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with row_indices on gpu context not supported";
// retain according to unique indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, src.shape(), src.ctx(), true,
src.dtype(), src.aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
Engine::Get()->PushAsync(
[=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
NDArray temp = retained_cpu; // get rid the of const qualifier
op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo,
&temp);
on_complete();
}, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()},
FnProperty::kNormal, priority, "KVStoreSparseRetain");
// if retained_cpu == out, CopyFromTo will ignore the copy operation
CopyFromTo(retained_cpu, out, priority);
}
}
private:
// reduce sum into val[0]
inline void ReduceSumCPU(const std::vector<NDArray> &in_data) {
MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, {
std::vector<DType*> dptr(in_data.size());
for (size_t i = 0; i < in_data.size(); ++i) {
TBlob data = in_data[i].data();
CHECK(data.CheckContiguous());
dptr[i] = data.FlatTo2D<cpu, DType>().dptr_;
}
size_t total = in_data[0].shape().Size();
ReduceSumCPUImpl(dptr, total);
});
}
// serial implementation of reduce sum for row sparse NDArray.
inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) {
using namespace rowsparse;
using namespace mshadow;
auto stype = out->storage_type();
CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype;
size_t total_num_rows = 0;
size_t num_in = in.size();
// skip the ones with empty indices and values
std::vector<bool> skip(num_in, false);
// the values tensor of the inputs
MSHADOW_TYPE_SWITCH(out->dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, {
std::vector<Tensor<cpu, 2, DType>> in_vals(num_in);
std::vector<Tensor<cpu, 1, IType>> in_indices(num_in);
// offset to the values tensor of all inputs
std::vector<size_t> offsets(num_in, 0);
std::vector<size_t> num_rows(num_in, 0);
for (size_t i = 0; i < num_in; i++) {
if (!in[i].storage_initialized()) {
skip[i] = true;
continue;
}
auto size = in[i].aux_shape(kIdx).Size();
num_rows[i] = size;
total_num_rows += size;
in_vals[i] = in[i].data().FlatTo2D<cpu, DType>();
in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>();
}
std::vector<IType> indices;
indices.reserve(total_num_rows);
// gather indices from all inputs
for (size_t i = 0; i < num_in; i++) {
for (size_t j = 0; j < num_rows[i]; j++) {
indices.emplace_back(in_indices[i][j]);
}
}
CHECK_EQ(indices.size(), total_num_rows);
// dedup indices
std::sort(indices.begin(), indices.end());
indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin());
// the one left are unique non-zero rows
size_t nnr = indices.size();
// allocate memory for output
out->CheckAndAlloc({Shape1(nnr)});
auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>();
auto val_data = out->data().FlatTo2D<cpu, DType>();
for (size_t i = 0; i < nnr; i++) {
// copy indices back
idx_data[i] = indices[i];
bool zeros = true;
for (size_t j = 0; j < num_in; j++) {
if (skip[j]) continue;
size_t offset = offsets[j];
if (offset < num_rows[j]) {
if (indices[i] == in_indices[j][offset]) {
if (zeros) {
Copy(val_data[i], in_vals[j][offset], nullptr);
zeros = false;
} else {
val_data[i] += in_vals[j][offset];
}
offsets[j] += 1;
}
}
}
}
});
});
}
template<typename DType>
inline static void ReduceSumCPU(
const std::vector<DType*> &dptr, size_t offset, index_t size) {
using namespace mshadow; // NOLINT(*)
Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size));
for (size_t i = 1; i < dptr.size(); i+=4) {
switch (dptr.size() - i) {
case 1: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
in_0 += in_1;
break;
}
case 2: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
in_0 += in_1 + in_2;
break;
}
case 3: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3;
break;
}
default: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3 + in_4;
break;
}
}
}
}
template<typename DType>
inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) {
const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10));
long ntask = (total + step - 1) / step; // NOLINT(*)
if (total < bigarray_bound_ || nthread_reduction_ <= 1) {
ReduceSumCPU(dptr, 0, total);
} else {
#pragma omp parallel for schedule(static) num_threads(nthread_reduction_)
for (long j = 0; j < ntask; ++j) { // NOLINT(*)
size_t k = static_cast<size_t>(j);
size_t begin = std::min(k * step, total);
size_t end = std::min((k + 1) * step, total);
if (j == ntask - 1) CHECK_EQ(end, total);
ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin));
}
}
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the merged value
NDArray merged;
/// \brief the cpu buffer for gpu data
std::vector<NDArray> copy_buf;
/// \brief the merged buffer for the given storage type
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
size_t bigarray_bound_;
int nthread_reduction_;
bool is_serial_push_;
};
/**
* \brief an implementation of Comm that performs reduction on device
* directly.
*
* It is faster if the total device-to-device bandwidths is larger than
* device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device
* memory.
*/
class CommDevice : public Comm {
public:
CommDevice() {
inited_ = false;
}
virtual ~CommDevice() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int dtype = mshadow::kFloat32) override {
sorted_key_attrs_.emplace_back(key, shape, dtype);
inited_ = false;
}
void InitBuffersAndComm(const std::vector<NDArray>& src) {
if (!inited_) {
std::vector<Context> devs;
for (const auto& a : src) {
devs.push_back(a.ctx());
}
InitMergeBuffer(devs);
if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) {
EnableP2P(devs);
}
}
}
const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src,
int priority) {
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
const NDArrayStorageType stype = src[0].storage_type();
NDArray& buf_merged = buf.merged_buf(stype);
if (buf.copy_buf.empty()) {
// initialize buffer for copying during reduce
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype());
}
}
CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf_merged, priority);
return buf_merged;
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
// when this reduce is called from kvstore_dist, gc is not set
// we don't do compression twice in dist_sync_device
if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) {
return ReduceCompressed(key, src, priority);
}
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
return src[0];
}
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
const NDArrayStorageType stype = src[0].storage_type();
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
CopyFromTo(src[0], &buf_merged, priority);
std::vector<NDArray> reduce(src.size());
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
// TODO(mli) this results in large device memory usage for huge ndarray,
// such as the largest fullc in VGG. consider to do segment reduce with
// NDArray.Slice or gpu direct memory access. for the latter, we need to
// remove some ctx check, and also it reduces 20% perf
buf.copy_buf.resize(src.size()-1);
for (size_t i = 0; i < src.size()-1; ++i) {
buf.copy_buf[i] = NDArray(
buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype());
}
}
for (size_t i = 0; i < src.size()-1; ++i) {
CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority);
reduce[i+1] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf_merged, priority);
} else {
// sparse reduce
buf_merged = ReduceRowSparse(key, src, priority);
}
return buf_merged;
}
const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src,
int priority) {
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
// one buf for each context
buf.copy_buf.resize(src.size());
buf.compressed_recv_buf.resize(src.size());
buf.compressed_send_buf.resize(src.size());
buf.residual.resize(src.size());
for (size_t i = 0; i < src.size(); ++i) {
buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(),
false, buf.merged.dtype());
buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(),
false, buf.merged.dtype());
buf.residual[i] = 0;
int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size());
buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(),
false, buf.merged.dtype());
buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(),
false, buf.merged.dtype());
}
}
for (size_t i = 0; i < src.size(); ++i) {
// compress before copy
// this is done even if the data is on same context as copy_buf because
// we don't want the training to be biased towards data on this GPU
gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority);
if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) {
CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority);
} else {
// avoid memory copy when they are on same context
buf.compressed_recv_buf[i] = buf.compressed_send_buf[i];
}
gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf.merged);
return buf.merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
if (!inited_) {
// copy to a random device first
int dev_id = key % dst.size();
CopyFromTo(src, dst[dev_id], priority);
for (size_t i = 0; i < dst.size(); ++i) {
if (i != static_cast<size_t>(dev_id)) {
CopyFromTo(*dst[dev_id], dst[i], priority);
}
}
} else {
auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf_merged, priority);
for (auto d : dst) {
CopyFromTo(buf_merged, d, priority);
}
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx(), src.ctx())
<< "row_id and src are expected to be on the same context";
// retain according to indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, out->shape(), src.ctx(), true,
out->dtype(), out->aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask;
Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
using namespace mxnet::common;
NDArray temp = retained_gpu;
switch (temp.ctx().dev_mask()) {
case cpu::kDevMask: {
SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo, &temp);
break;
}
#if MXNET_USE_CUDA
case gpu::kDevMask: {
SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(),
src, indices, kWriteTo, &temp);
// wait for GPU operations to complete
rctx.get_stream<gpu>()->Wait();
break;
}
#endif
default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR;
}
on_complete();
}, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()},
is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized,
priority, "KVStoreSparseRetain");
CopyFromTo(retained_gpu, out, priority);
}
}
using KeyAttrs = std::tuple<int, TShape, int>;
// try to allocate buff on device evenly
void InitMergeBuffer(const std::vector<Context>& devs) {
std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), [](
const KeyAttrs& a, const KeyAttrs& b) {
return std::get<1>(a).Size() > std::get<1>(b).Size();
});
std::unordered_map<int, std::pair<Context, size_t>> ctx_info;
for (auto d : devs) {
ctx_info[d.dev_id] = std::make_pair(d, 0);
}
for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) {
const int key = std::get<0>(sorted_key_attrs_[i]);
const TShape& shape = std::get<1>(sorted_key_attrs_[i]);
const int type = std::get<2>(sorted_key_attrs_[i]);
auto& buf = merge_buf_[key];
Context ctx;
size_t min_size = std::numeric_limits<size_t>::max();
for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) {
size_t size = it->second.second;
if (size <= min_size) {
ctx = it->second.first;
min_size = size;
}
}
// Delayed allocation - as the dense merged buffer might not be used at all if push()
// only sees sparse arrays
if (buf.merged.is_none()) {
bool delay_alloc = true;
buf.merged = NDArray(shape, ctx, delay_alloc, type);
}
ctx_info[ctx.dev_id].second += shape.Size();
}
inited_ = true;
}
private:
void EnableP2P(const std::vector<Context>& devs) {
#if MXNET_USE_CUDA
std::vector<int> gpus;
for (const auto& d : devs) {
if (d.dev_mask() == gpu::kDevMask) {
gpus.push_back(d.dev_id);
}
}
int n = static_cast<int>(gpus.size());
int enabled = 0;
std::vector<int> p2p(n*n);
for (int i = 0; i < n; ++i) {
cudaSetDevice(gpus[i]);
for (int j = 0; j < n; j++) {
int access;
cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]);
if (access) {
cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0);
if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) {
++enabled;
p2p[i*n+j] = 1;
}
}
}
}
if (enabled != n*(n-1)) {
// print warning info if not fully enabled
LOG(WARNING) << "only " << enabled << " out of "
<< n*(n-1) << " GPU pairs are enabled direct access. "
<< "It may affect the performance. "
<< "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off";
std::string access(n, '.');
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
access[j] = p2p[i*n+j] ? 'v' : '.';
}
LOG(WARNING) << access;
}
}
#endif
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the dense merged value for reduce and broadcast operations
NDArray merged;
/// \brief the gpu buffer for copy during reduce operation
std::vector<NDArray> copy_buf;
/// \brief the residual buffer for gradient compression
std::vector<NDArray> residual;
/// \brief the small buffer for compressed data in sender
std::vector<NDArray> compressed_send_buf;
/// \brief the small buffer for compressed data in receiver
std::vector<NDArray> compressed_recv_buf;
/// \brief the merged buffer for the given storage type (could be either dense or row_sparse)
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
CHECK(!merged.is_none()) << "unintialized merge buffer detected";
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value for reduce and rowsparse broadcast operations
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
public:
bool inited_;
std::vector<KeyAttrs> sorted_key_attrs_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_COMM_H_
|
GB_binop__isge_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__isge_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int16)
// A*D function (colscale): GB (_AxD__isge_int16)
// D*A function (rowscale): GB (_DxB__isge_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int16)
// C=scalar+B GB (_bind1st__isge_int16)
// C=scalar+B' GB (_bind1st_tran__isge_int16)
// C=A+scalar GB (_bind2nd__isge_int16)
// C=A'+scalar GB (_bind2nd_tran__isge_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_INT16 || GxB_NO_ISGE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isge_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__log2_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log2_fc64_fc64)
// op(A') function: GB (_unop_tran__log2_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_clog2 (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog2 (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_clog2 (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log2_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog2 (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_clog2 (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log2_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pi2_worksharing.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
void Usage(char *prog_name);
/*
* worksharing
*/
int main(int argc, char *argv[])
{
long long n, i;
double factor;
double sum = 0.0;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
#pragma omp parallel for reduction(+:sum) lastprivate(factor)
for (i = 0; i < n; i++)
{
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor / (2 * i + 1);
}
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return 0;
}
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <thread_count> <n>\n", prog_name);
fprintf(stderr, " n is the number of terms and should be >= 1\n");
exit(0);
}
|
rt.c | /*******************************************************************************
* RT-Flux-PIHM is a finite volume based, reactive transport module that operates
* on top of the hydrological land surface processes described by Flux-PIHM.
* RT-Flux-PIHM tracks the transportation and reaction in a given watershed. It
* uses operator splitting technique to couple transport and reaction.
*****************************************************************************/
#include "pihm.h"
/* Begin global variable definition (MACRO) */
#define UNIT_C 1440
#define ZERO 1E-20
#define LINE_WIDTH 512
#define WORDS_LINE 40
#define WORD_WIDTH 80
#define INFTYSMALL 1E-6
#define MIN(a,b) (((a)<(b))? (a):(b))
#define MAX(a,b) (((a)>(b))? (a):(b))
void Monitor(realtype stepsize, const pihm_struct pihm, Chem_Data CD)
{
int i;
double unit_c = stepsize / UNIT_C;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
double resflux = 0.0;
double sumflux1, sumflux2;
int j;
/*
* Correct recharge in the saturated zone
*/
for (j = 0; j < NUM_EDGE; j++)
{
resflux -= CD->Flux[RT_LAT_GW(i, j)].flux * unit_c;
}
sumflux1 =
(CD->Vcele[RT_GW(i)].height_t - CD->Vcele[RT_GW(i)].height_o) *
pihm->elem[i].topo.area * CD->Vcele[RT_GW(i)].porosity;
sumflux2 = sumflux1 - resflux;
/* Flux: in negative, out positive */
CD->Flux[RT_RECHG_GW(i)].flux = -sumflux2 * UNIT_C / stepsize;
CD->Flux[RT_RECHG_UNSAT(i)].flux = -CD->Flux[RT_RECHG_GW(i)].flux;
/*
* Correct infiltration in the unsaturated zone
*/
sumflux1 =
(CD->Vcele[RT_UNSAT(i)].height_t - CD->Vcele[RT_UNSAT(i)].height_o) *
pihm->elem[i].topo.area * CD->Vcele[RT_UNSAT(i)].porosity;
sumflux2 = sumflux1 + CD->Flux[RT_RECHG_UNSAT(i)].flux * unit_c;
CD->Flux[RT_INFIL(i)].flux = -sumflux2 * UNIT_C / stepsize;
/* Input of rain water chemistry can not be negative, i.e., infil.flux
* should be negative */
CD->Flux[RT_INFIL(i)].flux = MIN(CD->Flux[RT_INFIL(i)].flux, 0.0);
/* In addition, the soil evaporation leaves chemicals inside */
CD->Flux[RT_INFIL(i)].flux -=
fabs(pihm->elem[i].wf.edir_unsat + pihm->elem[i].wf.edir_gw) *
86400 * pihm->elem[i].topo.area;
}
}
int upstream(elem_struct up, elem_struct lo, const pihm_struct pihm)
{
/* Locate the upstream grid of up -> lo flow */
/* Require verification */
/* only determines points in triangular elements */
double x_, y_;
int i;
x_ = 2 * up.topo.x - lo.topo.x;
y_ = 2 * up.topo.y - lo.topo.y;
for (i = 0; i < nelem; i++)
{
double x_a, x_b, x_c;
double y_a, y_b, y_c;
double dot00, dot01, dot02, dot11, dot12, u, v, invDenom;
/* Find point lies in which triangular element, a very interesting
* method */
if ((i != (up.ind - 1)) && (i != (lo.ind - 1)))
{
x_a = pihm->meshtbl.x[pihm->elem[i].node[0] - 1];
x_b = pihm->meshtbl.x[pihm->elem[i].node[1] - 1];
x_c = pihm->meshtbl.x[pihm->elem[i].node[2] - 1];
y_a = pihm->meshtbl.y[pihm->elem[i].node[0] - 1];
y_b = pihm->meshtbl.y[pihm->elem[i].node[1] - 1];
y_c = pihm->meshtbl.y[pihm->elem[i].node[2] - 1];
dot00 = (x_c - x_a) * (x_c - x_a) + (y_c - y_a) * (y_c - y_a);
dot01 = (x_c - x_a) * (x_b - x_a) + (y_c - y_a) * (y_b - y_a);
dot02 = (x_c - x_a) * (x_ - x_a) + (y_c - y_a) * (y_ - y_a);
dot11 = (x_b - x_a) * (x_b - x_a) + (y_b - y_a) * (y_b - y_a);
dot12 = (x_b - x_a) * (x_ - x_a) + (y_b - y_a) * (y_ - y_a);
invDenom = 1.0 / (dot00 * dot11 - dot01 * dot01);
u = (dot11 * dot02 - dot01 * dot12) * invDenom;
v = (dot00 * dot12 - dot01 * dot02) * invDenom;
if ((u > 0.0) && (v > 0.0) && (u + v < 1.0))
{
return pihm->elem[i].ind;
}
}
}
return 0;
}
int realcheck(const char *words)
{
int flg = 1, i;
if (((words[0] >= '0') && (words[0] <= '9')) ||
(words[0] == '.') || (words[0] == '-') || (words[0] == '+'))
{
for (i = 0; i < (int)strlen(words); i++)
{
/* Ascii 10 is new line and 13 is carriage return */
if ((words[i] > '9' || words[i] < '+') && (words[i] != 'E')
&& (words[i] != 'e') && (words[i] != 10) && (words[i] != 13))
{
flg = 0;
}
}
}
else
{
flg = 0;
}
return (flg);
}
int keymatch(const char *line, const char *keyword, double *value, char **strval)
{
/* A very general and convinient way of reading datafile and input file */
/* find keyword in line, assign the value after keyword to value array if there is any */
/* store both numbers and strings in order for later use, buffer required */
/* if is keyword not found return 0. If comments, return 2. Otherwise return 1 */
int i;
for (i = 0; i < WORDS_LINE; i++)
value[i] = 0.0;
if ((line[0] == '!') || (line[0] == '#'))
{
/* assign a special flag for comments */
return (2);
}
int j, k;
int words_line = WORDS_LINE;
int keyfoundflag = 0;
char **words;
words = (char **)malloc(WORDS_LINE * sizeof(char *));
for (i = 0; i < WORDS_LINE; i++)
{
words[i] = (char *)malloc(WORD_WIDTH * sizeof(char));
memset(words[i], 0, WORD_WIDTH);
}
i = j = k = 0;
/* Partition the line into words */
while (i < (int)strlen(line))
{
if (line[i] != 39)
{
while (line[i] != 9 && line[i] != 0 && line[i] != 10
&& line[i] != 32 && line[i] != 13)
{
words[k][j++] = line[i++];
if (line[i] == 9 || line[i] == 32 || line[i] == 13)
{
k++;
j = 0;
}
}
}
else
{
words[k][j++] = line[i++];
while (line[i] != 39)
{
words[k][j++] = line[i++];
}
words[k++][j] = line[i++];
j = 0;
}
i++;
}
words_line = k + 1;
for (i = 0; i < words_line; i++)
if (strcmp(words[i], keyword) == 0)
keyfoundflag = 1;
j = k = 0;
for (i = 0; i < words_line; i++)
{
strcpy(strval[k++], words[i]);
if (realcheck(words[i]) == 1)
value[j++] = atof(words[i]);
}
for (i = 0; i < WORDS_LINE; i++)
free(words[i]);
free(words);
return (keyfoundflag);
}
void chem_alloc(char *filename, const pihm_struct pihm, Chem_Data CD)
{
int i, j, k;
int num_species, num_mineral, num_ads, num_cex, num_other,
num_conditions = 0;
int line_width = LINE_WIDTH, words_line =
WORDS_LINE, word_width = WORD_WIDTH;
int Global_diff = 0, Global_disp = 0;
int speciation_flg = 0, specflg;
double total_area = 0.0, tmpval[WORDS_LINE];
char cmdstr[MAXSTRING];
int lno = 0;
int PRCP_VOL;
int VIRTUAL_VOL;
assert(pihm != NULL);
char line[256];
char **tmpstr = (char **)malloc(WORDS_LINE * sizeof(char *));
for (i = 0; i < words_line; i++)
tmpstr[i] = (char *)malloc(WORD_WIDTH * sizeof(char));
char *chemfn =
(char *)malloc((strlen(filename) * 2 + 100) * sizeof(char));
sprintf(chemfn, "input/%s/%s.chem", filename, filename);
FILE *chemfile = fopen(chemfn, "r");
char *datafn =
(char *)malloc((strlen(filename) * 2 + 100) * sizeof(char));
sprintf(datafn, "input/%s/%s.cdbs", filename, filename);
FILE *database = fopen(datafn, "r");
char *forcfn =
(char *)malloc((strlen(filename) * 2 + 100) * sizeof(char));
sprintf(forcfn, "input/%s/%s.prep", filename, filename);
FILE *prepconc = fopen(forcfn, "r");
char *maxwaterfn =
(char *)malloc((strlen(filename) * 2 + 100) * sizeof(char));
sprintf(maxwaterfn, "input/%s/%s.maxwater", filename, filename);
FILE *maxwater = fopen(maxwaterfn, "r");
free(maxwaterfn);
if (chemfile == NULL)
{
fprintf(stderr, "\n Fatal Error: %s.chem does not exist! \n",
filename);
exit(1);
}
if (database == NULL)
{
fprintf(stderr, "\n Fatal Error: %s.cdbs does not exist! \n",
filename);
exit(1);
}
if (prepconc == NULL)
{
fprintf(stderr, "\n Fatal Error: %s.prep does not exist! \n",
filename);
exit(1);
}
if (maxwater == NULL)
{
fprintf(stderr, "\n Fatal Error: %s.maxwater does not exist! \n",
filename);
exit(1);
}
/*
* Begin updating variables
*/
#if defined(_FBR_)
CD->NumVol = 4 * nelem + nriver + 2;
#else
CD->NumVol = 2 * nelem + nriver + 2;
#endif
CD->NumOsv = CD->NumVol - 2;
CD->NumEle = nelem;
CD->NumRiv = nriver;
PRCP_VOL = CD->NumVol - 1;
VIRTUAL_VOL = CD->NumVol;
/* Default control variable if not found in input file */
CD->StartTime = pihm->ctrl.starttime / 60;
CD->TVDFlg = 1;
CD->OutItv = 1;
CD->Cementation = 1.0;
CD->ACTmod = 0;
CD->DHEdel = 0;
CD->TEMcpl = 0;
CD->EffAds = 0;
CD->RelMin = 0;
CD->AvgScl = 1;
CD->CptFlg = 1;
CD->TimRiv = 1.0;
CD->React_delay = 10;
CD->Condensation = 1.0;
CD->NumBTC = 0;
CD->NumPUMP = 0;
CD->SUFEFF = 1;
CD->CnntVelo = 0.01;
CD->TimLst = 0.0;
/* Reading "*.chem" */
/* RUNTIME block */
fprintf(stderr, "\n Reading '%s.chem' RUNTIME: \n", filename);
rewind(chemfile);
fgets(line, line_width, chemfile);
while (keymatch(line, "RUNTIME", tmpval, tmpstr) != 1)
fgets(line, line_width, chemfile);
while (keymatch(line, "END", tmpval, tmpstr) != 1)
{
fgets(line, line_width, chemfile);
if (keymatch(line, "tvd", tmpval, tmpstr) == 1)
{
if (strcmp(tmpstr[1], "false") == 0)
CD->TVDFlg = 0;
if (strcmp(tmpstr[1], "true") == 0)
CD->TVDFlg = 1;
if (strcmp(tmpstr[1], "false") && strcmp(tmpstr[1], "true"))
fprintf(stderr, " TVD FLAG INPUT ERROR! \n");
fprintf(stderr, " Total variation diminishing set to %d %s. \n",
CD->TVDFlg, tmpstr[1]);
}
if (keymatch(line, "output", tmpval, tmpstr) == 1)
{
CD->OutItv = (int)tmpval[0];
fprintf(stderr, " Output interval set to %d hours. \n",
CD->OutItv);
}
if (keymatch(line, "activity", tmpval, tmpstr) == 1)
{
CD->ACTmod = (int)tmpval[0];
fprintf(stderr, " Activity correction is set to %d. \n",
CD->ACTmod);
/* 0 for unity activity coefficient and 1 for DH equation update */
}
if (keymatch(line, "act_coe_delay", tmpval, tmpstr) == 1)
{
CD->DHEdel = (int)tmpval[0];
fprintf(stderr,
" Activity coefficient update delay is set to %d. \n",
CD->DHEdel);
/* 0 for delay and 1 for no delay (solving together) */
}
if (keymatch(line, "thermo", tmpval, tmpstr) == 1)
{
CD->TEMcpl = (int)tmpval[0];
fprintf(stderr, " Coupling of thermo modelling is set to %d. \n",
CD->DHEdel);
/* 0 for delay and 1 for no delay (solving together) */
}
if (keymatch(line, "relmin", tmpval, tmpstr) == 1)
{
CD->RelMin = (int)tmpval[0];
switch (CD->RelMin)
{
case 0:
fprintf(stderr,
" Using absolute mineral volume fraction. \n");
break;
case 1:
fprintf(stderr,
" Using relative mineral volume fraction. \n");
break;
}
}
if (keymatch(line, "effads", tmpval, tmpstr) == 1)
{
CD->EffAds = (int)tmpval[0];
switch (CD->EffAds)
{
case 0:
fprintf(stderr, " Using the normal adsorption model. \n");
break;
case 1:
fprintf(stderr,
" Using the coupled MIM and adsorption model. \n");
break;
/* under construction. */
}
}
if (keymatch(line, "transport_only", tmpval, tmpstr) == 1)
{
CD->RecFlg = (int)tmpval[0];
switch (CD->RecFlg)
{
case 0:
fprintf(stderr, " Transport only mode disabled.\n");
break;
case 1:
fprintf(stderr, " Transport only mode enabled. \n");
break;
/* under construction. */
}
}
if (keymatch(line, "precipitation", tmpval, tmpstr) == 1)
{
CD->PrpFlg = (int)tmpval[0];
switch (CD->PrpFlg)
{
case 0:
fprintf(stderr, " No precipitation condition. \n");
break;
case 1:
fprintf(stderr,
" Precipitation condition is to be specified. \n");
break;
case 2:
fprintf(stderr,
" Precipitation condition is specified via file *.prep. \n");
break;
/* under construction. */
}
}
if (keymatch(line, "RT_delay", tmpval, tmpstr) == 1)
{
CD->Delay = (int)tmpval[0];
fprintf(stderr,
" Flux-PIHM-RT will start after running PIHM for %d days. \n",
CD->Delay);
CD->Delay *= UNIT_C;
/* under construction. */
}
if (keymatch(line, "Condensation", tmpval, tmpstr) == 1)
{
CD->Condensation = tmpval[0];
fprintf(stderr,
" The concentrations of infiltrating rainfall is set to be %f times of concentrations in precipitation. \n",
CD->Condensation);
/* under construction. */
//CD->Condensation *= CS->Cal.Prep_conc; // 09.25 temporal comment-out
fprintf(stderr,
" The concentrations of infiltrating rainfall is set to be %f times of concentrations in precipitation. \n",
CD->Condensation);
}
if (keymatch(line, "AvgScl", tmpval, tmpstr) == 1)
{
CD->React_delay = tmpval[0];
fprintf(stderr,
" Averaging window for asynchronous reaction %d. \n",
CD->React_delay);
/* under construction. */
}
if (keymatch(line, "SUFEFF", tmpval, tmpstr) == 1)
{
CD->SUFEFF = tmpval[0];
fprintf(stderr, " Effective surface area mode set to %d. \n\n",
CD->SUFEFF);
/* under construction. */
}
if (keymatch(line, "Mobile_exchange", tmpval, tmpstr) == 1)
{
CD->TimRiv = tmpval[0];
fprintf(stderr, " Ratio of immobile ion exchange site %f. \n",
CD->TimRiv);
/* under construction. */
}
if (keymatch(line, "Connectivity_threshold", tmpval, tmpstr) == 1)
{
CD->CnntVelo = tmpval[0];
fprintf(stderr,
" Minimum velocity to be deemed as connected is %f m/d. \n",
CD->CnntVelo);
/* under construction. */
}
}
/* OUTPUT block */
fprintf(stderr, "\n Reading '%s.chem' OUTPUT: \n", filename);
rewind(chemfile);
fgets(line, line_width, chemfile);
while ((keymatch(line, "OUTPUT", tmpval, tmpstr) != 1) && (!feof(chemfile)))
fgets(line, line_width, chemfile);
CD->NumBTC = tmpval[0];
fprintf(stderr, " %d breakthrough points specified. \n", CD->NumBTC);
CD->BTC_loc = (int *)malloc(CD->NumBTC * sizeof(int));
i = 0;
fprintf(stderr, " --");
while (keymatch(line, "END", tmpval, tmpstr) != 1)
{
fgets(line, line_width, chemfile);
if (keymatch(line, " ", tmpval, tmpstr) != 2)
{
CD->BTC_loc[i] = (int)tmpval[0] - 1;
fprintf(stderr, " Grid %d ", CD->BTC_loc[i] + 1);
i++;
}
if (i >= CD->NumBTC)
break;
}
fprintf(stderr, "are breakthrough points.\n\n");
/* GLOBAL block */
fprintf(stderr, " Reading '%s.chem' GLOBAL: \n", filename);
species Global_type;
Global_type.ChemName = (char *)malloc(WORD_WIDTH * sizeof(char));
strcpy(Global_type.ChemName, "GLOBAL");
rewind(chemfile);
fgets(line, line_width, chemfile);
while (keymatch(line, "GLOBAL", tmpval, tmpstr) != 1)
fgets(line, line_width, chemfile);
while (keymatch(line, "END", tmpval, tmpstr) != 1)
{
fgets(line, line_width, chemfile);
if (keymatch(line, "t_species", tmpval, tmpstr) == 1)
{
CD->NumStc = (int)tmpval[0];
fprintf(stderr, " %d chemical species specified. \n", CD->NumStc);
/* H2O is always a primary species */
}
if (keymatch(line, "s_species", tmpval, tmpstr) == 1)
{
CD->NumSsc = (int)tmpval[0];
fprintf(stderr, " %d secondary species specified. \n",
(int)tmpval[0]);
}
if (keymatch(line, "minerals", tmpval, tmpstr) == 1)
{
CD->NumMin = (int)tmpval[0];
fprintf(stderr, " %d minerals specified. \n", CD->NumMin);
}
if (keymatch(line, "adsorption", tmpval, tmpstr) == 1)
{
CD->NumAds = (int)tmpval[0];
fprintf(stderr, " %d surface complexation specified. \n",
CD->NumAds);
}
if (keymatch(line, "cation_exchange", tmpval, tmpstr) == 1)
{
CD->NumCex = (int)tmpval[0];
fprintf(stderr, " %d cation exchange specified. \n", CD->NumCex);
}
if (keymatch(line, "mineral_kinetic", tmpval, tmpstr) == 1)
{
CD->NumMkr = (int)tmpval[0];
fprintf(stderr, " %d mineral kinetic reaction(s) specified. \n",
CD->NumMkr);
}
if (keymatch(line, "aqueous_kinetic", tmpval, tmpstr) == 1)
{
CD->NumAkr = (int)tmpval[0];
fprintf(stderr, " %d aqueous kinetic reaction(s) specified. \n",
CD->NumAkr);
}
if (keymatch(line, "diffusion", tmpval, tmpstr) == 1)
{
fprintf(stderr, " Diffusion coefficient = %g [cm2/s] \n",
tmpval[0]);
Global_type.DiffCoe = tmpval[0] * 60.0 * 60.0 * 24.0 / 10000.0;
Global_diff = 1;
/* Require unit conversion ! */
}
if (keymatch(line, "dispersion", tmpval, tmpstr) == 1)
{
fprintf(stderr, " Dispersion coefficient = %2.2f [m] \n",
tmpval[0]);
Global_type.DispCoe = tmpval[0];
Global_disp = 1;
/* Set global flags to indicate the global values are present */
}
if (keymatch(line, "cementation", tmpval, tmpstr) == 1)
{
fprintf(stderr, " Cementation factor = %2.1f \n", tmpval[0]);
CD->Cementation = tmpval[0];
}
if (keymatch(line, "temperature", tmpval, tmpstr) == 1)
{
CD->Temperature = tmpval[0];
fprintf(stderr, " Temperature = %3.1f \n\n", CD->Temperature);
}
}
/* The number of species that are mobile, later used in the OS3D subroutine */
CD->NumSpc = CD->NumStc - (CD->NumMin + CD->NumAds + CD->NumCex);
/* The number of species that others depend on */
CD->NumSdc = CD->NumStc - CD->NumMin;
CD->Dependency = (double **)malloc(CD->NumSsc * sizeof(double *));
for (i = 0; i < CD->NumSsc; i++)
{
CD->Dependency[i] = (double *)malloc(CD->NumSdc * sizeof(double));
/* Convert secondary species as an expression of primary species */
for (j = 0; j < CD->NumSdc; j++)
CD->Dependency[i][j] = 0.0;
}
CD->Dep_kinetic =
(double **)malloc((CD->NumMkr + CD->NumAkr) * sizeof(double *));
for (i = 0; i < CD->NumMkr + CD->NumAkr; i++)
{
CD->Dep_kinetic[i] = (double *)malloc(CD->NumStc * sizeof(double));
/* Express kinetic species as function of primary species */
for (j = 0; j < CD->NumStc; j++)
CD->Dep_kinetic[i][j] = 0.0;
}
CD->Dep_kinetic_all = (double **)malloc((CD->NumMin) * sizeof(double *));
for (i = 0; i < CD->NumMin; i++)
{
CD->Dep_kinetic_all[i] = (double *)malloc(CD->NumStc * sizeof(double));
/* Dependencies of minearls, all */
for (j = 0; j < CD->NumStc; j++)
CD->Dep_kinetic_all[i][j] = 0.0;
}
/* Keqs of equilibrium/ kinetic and kinetic all */
CD->Keq = (double *)malloc(CD->NumSsc * sizeof(double));
CD->KeqKinect =
(double *)malloc((CD->NumMkr + CD->NumAkr) * sizeof(double));
CD->KeqKinect_all = (double *)malloc(CD->NumMin * sizeof(double));
/* Convert total concentration as an expression of all species */
CD->Totalconc = (double **)malloc(CD->NumStc * sizeof(double *));
for (i = 0; i < CD->NumStc; i++)
CD->Totalconc[i] =
(double *)malloc((CD->NumStc + CD->NumSsc) * sizeof(double));
#if NOT_YET_IMPLEMENTED
/* Convert total concentration as an expression of all species */
CD->Totalconck = (double **)malloc(CD->NumStc * sizeof(double *));
for (i = 0; i < CD->NumStc; i++)
CD->Totalconck[i] =
(double *)malloc((CD->NumStc + CD->NumSsc) * sizeof(double));
#endif
for (i = 0; i < CD->NumStc; i++)
for (j = 0; j < CD->NumStc + CD->NumSsc; j++)
{
CD->Totalconc[i][j] = 0.0;
#if NOT_YET_IMPLEMENTED
CD->Totalconck[i][j] = 0.0;
#endif
}
/* INITIAL_CONDITIONS block */
fprintf(stderr, " Reading '%s.chem' INITIAL_CONDITIONS: \n", filename);
rewind(chemfile);
fgets(line, line_width, chemfile);
while (keymatch(line, "INITIAL_CONDITIONS", tmpval, tmpstr) != 1)
fgets(line, line_width, chemfile);
fgets(line, line_width, chemfile);
while (keymatch(line, "END", tmpval, tmpstr) != 1)
{
if (keymatch(line, " ", tmpval, tmpstr) != 2)
{
num_conditions++;
}
fgets(line, line_width, chemfile);
}
fprintf(stderr, " %d conditions assigned. \n", num_conditions);
char **chemcon = (char **)malloc(num_conditions * sizeof(char *));
for (i = 0; i < num_conditions; i++)
chemcon[i] = (char *)malloc(word_width * sizeof(char));
char ***con_chem_name =
(char ***)malloc((num_conditions + 1) * sizeof(char **));
for (i = 0; i < num_conditions + 1; i++)
{ /* all conditions + precipitation */
con_chem_name[i] = (char **)malloc(CD->NumStc * sizeof(char *));
for (j = 0; j < CD->NumStc; j++)
con_chem_name[i][j] = (char *)malloc(WORD_WIDTH * sizeof(char));
}
int *condition_index = (int *)malloc(CD->NumVol * sizeof(int));
/* When user assign conditions to blocks, they start from 1 */
for (i = 0; i < CD->NumVol; i++)
{
condition_index[i] = 0;
}
vol_conc *Condition_vcele =
(vol_conc *) malloc(num_conditions * sizeof(vol_conc));
for (i = 0; i < num_conditions; i++)
{
Condition_vcele[i].index = i + 1;
Condition_vcele[i].t_conc =
(double *)malloc(CD->NumStc * sizeof(double));
Condition_vcele[i].p_conc =
(double *)malloc(CD->NumStc * sizeof(double));
Condition_vcele[i].p_para =
(double *)malloc(CD->NumStc * sizeof(double));
Condition_vcele[i].p_type = (int *)malloc(CD->NumStc * sizeof(int));
Condition_vcele[i].s_conc = NULL;
/* We do not input cocentration for secondary speices in rt */
for (j = 0; j < CD->NumStc; j++)
{
Condition_vcele[i].t_conc[j] = ZERO;
Condition_vcele[i].p_conc[j] = ZERO;
}
}
if (CD->PrpFlg)
{
CD->Precipitation.t_conc =
(double *)malloc(CD->NumStc * sizeof(double));
CD->Precipitation.p_conc =
(double *)malloc(CD->NumStc * sizeof(double));
CD->Precipitation.p_para =
(double *)malloc(CD->NumStc * sizeof(double));
CD->Precipitation.p_type = (int *)malloc(CD->NumStc * sizeof(int));
CD->Precipitation.s_conc = NULL;
for (i = 0; i < CD->NumStc; i++)
{
CD->Precipitation.t_conc[i] = ZERO;
CD->Precipitation.p_conc[i] = ZERO;
}
}
CD->chemtype =
(species *) malloc((CD->NumStc + CD->NumSsc) * sizeof(species));
if (CD->chemtype == NULL)
fprintf(stderr, " Memory allocation error\n");
for (i = 0; i < CD->NumStc + CD->NumSsc; i++)
{
if (Global_diff == 1)
CD->chemtype[i].DiffCoe = Global_type.DiffCoe;
/*
* else
* CD->chemtype[i].DiffCoe = ZERO;
*/
/* in squre m per day */
if (Global_disp == 1)
CD->chemtype[i].DispCoe = Global_type.DispCoe;
else
CD->chemtype[i].DispCoe = ZERO;
CD->chemtype[i].ChemName = (char *)malloc(WORD_WIDTH * sizeof(char));
assert(CD->chemtype[i].ChemName != NULL);
memset(CD->chemtype[i].ChemName, 0, WORD_WIDTH);
CD->chemtype[i].Charge = 0.0;
CD->chemtype[i].SizeF = 1.0;
CD->chemtype[i].itype = 0;
}
k = 0;
int initfile = 0;
FILE *cheminitfile = NULL;
rewind(chemfile);
fgets(line, line_width, chemfile);
while (keymatch(line, "INITIAL_CONDITIONS", tmpval, tmpstr) != 1)
fgets(line, line_width, chemfile);
if (strcmp(tmpstr[1], "FILE") == 0)
{
/* Initialize chemical distribution from file evoked. This will nullify
* all the condition assignment given in the next lines.
* But for now, please keep those lines to let the code work. */
initfile = 1;
fprintf(stderr, " Specifiying the initial chemical distribution from file '%s.cini'. \n", filename);
char *cheminit =
(char *)malloc((strlen(filename) * 2 + 100) * sizeof(char));
sprintf(cheminit, "input/%s/%s.cini", filename, filename);
cheminitfile = fopen(cheminit, "r");
if (cheminitfile == NULL)
{
fprintf(stderr, " Fatal Error: %s.cini does not exist! \n",
filename);
exit(1);
}
else
{
fprintf(stderr, " Reading the '%s.cini'!! \n", filename);
}
free(cheminit); // 10.02
}
fgets(line, line_width, chemfile);
while (keymatch(line, "END", tmpval, tmpstr) != 1)
{
if (keymatch(line, " ", tmpval, tmpstr) != 2)
{
strcpy(chemcon[k++], tmpstr[0]);
if (initfile == 0)
{
PIHMprintf(VL_ERROR,
"Assigning initial conditions in .chem file is temporarily"
" disabled. Please use a .cini file.\n");
PIHMexit(EXIT_FAILURE);
}
}
fgets(line, line_width, chemfile);
}
if (initfile == 1)
{
for (i = 0; i < CD->NumVol; i++)
{
fscanf(cheminitfile, "%d %d", &k, &condition_index[i]);
}
}
if (cheminitfile != NULL)
fclose(cheminitfile);
/* CONDITIONS block */
fprintf(stderr, "\n Reading '%s.chem' CONDITIONS: ", filename);
for (i = 0; i < num_conditions; i++)
{
rewind(chemfile);
num_species = 0;
num_mineral = 0;
num_ads = 0;
num_cex = 0;
num_other = 0;
fgets(line, line_width, chemfile);
while ((keymatch(line, "Condition", tmpval, tmpstr) != 1) ||
(keymatch(line, chemcon[i], tmpval, tmpstr) != 1))
fgets(line, line_width, chemfile);
if (strcmp(tmpstr[1], chemcon[i]) == 0)
fprintf(stderr, "\n %s", line);
fgets(line, line_width, chemfile);
while (keymatch(line, "END", tmpval, tmpstr) != 1)
{
if (keymatch(line, "NULL", tmpval, tmpstr) != 2)
{
specflg = SpeciationType(database, tmpstr[0]);
if (specflg == AQUEOUS)
{
/* Arrange the concentration of the primary species in such a
* way that all the mobile species are at the beginning. */
num_other = num_mineral + num_ads + num_cex;
Condition_vcele[i].t_conc[num_species - num_other] =
tmpval[0];
strcpy(con_chem_name[i][num_species - num_other],
tmpstr[0]);
fprintf(stderr, " %-28s %g \n",
con_chem_name[i][num_species - num_other], tmpval[0]);
Condition_vcele[i].p_type[num_species - num_other] = AQUEOUS;
}
if (specflg == MINERAL)
{
Condition_vcele[i].t_conc[CD->NumSpc + CD->NumAds +
CD->NumCex + num_mineral] = tmpval[0];
if (strcmp(tmpstr[2], "-ssa") == 0)
Condition_vcele[i].p_para[CD->NumSpc + CD->NumAds +
CD->NumCex + num_mineral] = tmpval[1] * 1.0;
strcpy(con_chem_name[i][CD->NumSpc + CD->NumAds +
CD->NumCex + num_mineral], tmpstr[0]);
fprintf(stderr,
" mineral %-20s %6.4f \t specific surface area \t%6.4f \n",
con_chem_name[i][CD->NumSpc + CD->NumAds + CD->NumCex +
num_mineral], tmpval[0], tmpval[1]);
Condition_vcele[i].p_type[CD->NumSpc + CD->NumAds +
CD->NumCex + num_mineral] = MINERAL;
num_mineral++;
}
if ((tmpstr[0][0] == '>') || (specflg == ADSORPTION))
{
/* Adsorptive sites and species start with > */
/* Condition_vcele[i].t_conc[CD->NumSpc + num_ads] = tmpval[0] * CS->Cal.Site_den; 09.25 temporal comment-out */
Condition_vcele[i].t_conc[CD->NumSpc + num_ads] =
tmpval[0] * 1.0;
Condition_vcele[i].p_type[CD->NumSpc + num_ads] = ADSORPTION;
Condition_vcele[i].p_para[CD->NumSpc + num_ads] = 0;
/* Update when fill in the parameters for adsorption */
strcpy(con_chem_name[i][CD->NumSpc + num_ads], tmpstr[0]);
fprintf(stderr, " surface complex %s\t\t%6.4f \n",
con_chem_name[i][CD->NumSpc + num_ads], tmpval[0]);
num_ads++;
/* under construction */
}
if (specflg == CATION_ECHG)
{
Condition_vcele[i].t_conc[CD->NumSpc + CD->NumAds +
num_cex] = tmpval[0];
Condition_vcele[i].p_type[CD->NumSpc + CD->NumAds +
num_cex] = CATION_ECHG;
Condition_vcele[i].p_para[CD->NumSpc + CD->NumAds +
num_cex] = 0;
/* update when fill in the parameters for cation exchange. */
strcpy(con_chem_name[i][CD->NumSpc + CD->NumAds + num_cex],
tmpstr[0]);
fprintf(stderr, " cation exchange %s\t\t%6.4f \n",
con_chem_name[i][CD->NumSpc + CD->NumAds + num_cex],
tmpval[0]);
num_cex++;
/* under construction */
}
num_species++;
}
fgets(line, line_width, chemfile);
}
}
/* PRECIPITATION block */
fprintf(stderr, "\n Reading '%s.chem' PRECIPITATION: ", filename);
if (CD->PrpFlg)
{
rewind(chemfile);
fgets(line, line_width, chemfile);
while (keymatch(line, "PRECIPITATION", tmpval, tmpstr) != 1)
fgets(line, line_width, chemfile);
fgets(line, line_width, chemfile);
fprintf(stderr, " \n");
fprintf(stderr, " ---------------------------------\n");
fprintf(stderr, " The condition of precipitation is \n");
fprintf(stderr, " ---------------------------------\n");
num_species = 0;
num_mineral = 0;
num_ads = 0;
num_cex = 0;
num_other = 0;
while (keymatch(line, "END", tmpval, tmpstr) != 1)
{
if (keymatch(line, "NULL", tmpval, tmpstr) != 2)
{
specflg = SpeciationType(database, tmpstr[0]);
if (specflg == AQUEOUS)
{
num_other = num_mineral + num_ads + num_cex;
CD->Precipitation.t_conc[num_species - num_other] =
tmpval[0];
strcpy(con_chem_name[num_conditions][num_species -
num_other], tmpstr[0]);
fprintf(stderr, " %-28s %g \n",
con_chem_name[num_conditions][num_species - num_other],
tmpval[0]);
CD->Precipitation.p_type[num_species - num_other] = AQUEOUS;
}
/* arrange the concentration of the primary species in such a
* way that all the mobile species are at the beginning. */
if (specflg == MINERAL)
{
CD->Precipitation.t_conc[CD->NumSpc + CD->NumAds +
CD->NumCex + num_mineral] = tmpval[0];
if (strcmp(tmpstr[2], "-ssa") == 0)
CD->Precipitation.p_para[CD->NumSpc + CD->NumAds +
CD->NumCex + num_mineral] = tmpval[1];
strcpy(con_chem_name[num_conditions][CD->NumSpc +
CD->NumAds + CD->NumCex + num_mineral], tmpstr[0]);
fprintf(stderr,
" mineral %-20s %6.4f \t specific surface area %6.4f\n",
con_chem_name[num_conditions][CD->NumSpc + CD->NumAds +
CD->NumCex + num_mineral], tmpval[0], tmpval[1]);
CD->Precipitation.p_type[CD->NumSpc + CD->NumAds +
CD->NumCex + num_mineral] = MINERAL;
num_mineral++;
}
if ((tmpstr[0][0] == '>') || (specflg == ADSORPTION))
{
/* adsorptive sites and species start with > */
CD->Precipitation.t_conc[CD->NumSpc + num_ads] =
tmpval[0]; /* this is the site density of the adsorptive species. */
CD->Precipitation.p_type[CD->NumSpc + num_ads] = ADSORPTION;
CD->Precipitation.p_para[CD->NumSpc + num_ads] = 0;
/* Update when fill in the parameters for adsorption. */
strcpy(con_chem_name[num_conditions][CD->NumSpc + num_ads],
tmpstr[0]);
fprintf(stderr, " surface complex %s\t %6.4f\n",
con_chem_name[num_conditions][CD->NumSpc + num_ads],
tmpval[0]);
num_ads++;
/* under construction */
}
if (specflg == CATION_ECHG)
{
CD->Precipitation.t_conc[CD->NumSpc + CD->NumAds +
num_cex] = tmpval[0];
CD->Precipitation.p_type[CD->NumSpc + CD->NumAds +
num_cex] = CATION_ECHG;
CD->Precipitation.p_para[CD->NumSpc + CD->NumAds +
num_cex] = 0;
/* Update when fill in the parameters for cation exchange. */
strcpy(con_chem_name[num_conditions][CD->NumSpc +
CD->NumAds + num_cex], tmpstr[0]);
fprintf(stderr, " cation exchange %s\t %6.4f\n",
con_chem_name[num_conditions][CD->NumSpc + CD->NumAds +
num_cex], tmpval[0]);
num_cex++;
/* under construction */
}
num_species++;
}
fgets(line, line_width, chemfile);
}
}
int check_conditions_num;
if (CD->PrpFlg)
check_conditions_num = num_conditions + 1;
else
check_conditions_num = num_conditions;
if (num_species != CD->NumStc)
fprintf(stderr, " Number of species does not match indicated value!\n");
for (i = 1; i < check_conditions_num; i++)
{
for (j = 0; j < num_species; j++)
{
if (strcmp(con_chem_name[i][j], con_chem_name[i - 1][j]) != 0)
{
fprintf(stderr,
" The order of the chemicals in condition <%s> is incorrect!\n",
chemcon[i - 1]);
}
}
}
/* Primary species table */
fprintf(stderr,
"\n Primary species and their types: [1], aqueous; [2], adsorption; [3], cation exchange; [4], mineral. \n");
/* Number of total species in the rt simulator */
for (i = 0; i < CD->NumStc; i++)
{
strcpy(CD->chemtype[i].ChemName, con_chem_name[0][i]);
CD->chemtype[i].itype = Condition_vcele[0].p_type[i];
fprintf(stderr, " %-20s %10d\n", CD->chemtype[i].ChemName,
CD->chemtype[i].itype);
}
/* Precipitation conc table */
if (CD->PrpFlg)
{
fprintf(stderr, "\n Total concentraions in precipitataion: \n");
for (i = 0; i < CD->NumSpc; i++)
{
if (!strcmp(con_chem_name[num_conditions][i], "pH"))
{
if (CD->Precipitation.t_conc[i] < 7)
{
CD->Precipitation.t_conc[i] =
pow(10, -CD->Precipitation.t_conc[i]);
}
else
{
CD->Precipitation.t_conc[i] =
-pow(10, CD->Precipitation.t_conc[i] - 14);
}
}
/* Change the pH of precipitation into total concentraion of H
* We skip the speciation for rain and assume it is OK to calculate
* this way. */
fprintf(stderr, " %-20s %-10.3g [M] \n",
con_chem_name[num_conditions][i], CD->Precipitation.t_conc[i]);
}
}
/* SECONDARY_SPECIES block */
fprintf(stderr, "\n Reading 'shp.chem' SECONDARY_SPECIES: \n");
fprintf(stderr, " Secondary species specified in the input file: \n");
rewind(chemfile);
fgets(line, line_width, chemfile);
while (keymatch(line, "SECONDARY_SPECIES", tmpval, tmpstr) != 1)
fgets(line, line_width, chemfile);
fgets(line, line_width, chemfile);
while (keymatch(line, "END", tmpval, tmpstr) != 1)
{
if (keymatch(line, "NULL", tmpval, tmpstr) != 2)
{
strcpy(CD->chemtype[num_species++].ChemName, tmpstr[0]);
fprintf(stderr, " %s \n", CD->chemtype[num_species - 1].ChemName);
}
fgets(line, line_width, chemfile);
}
/* MINERALS block */
fprintf(stderr, "\n Reading 'shp.chem' MINERALS: \n");
CD->kinetics =
(Kinetic_Reaction *) malloc(CD->NumMkr * sizeof(Kinetic_Reaction));
for (i = 0; i < CD->NumMkr; i++)
{
for (j = 0; j < MAXDEP; j++)
{
CD->kinetics[i].dep_position[j] = 0;
CD->kinetics[i].monod_position[j] = 0; // 08.19
CD->kinetics[i].inhib_position[j] = 0; // 08.19
}
}
k = 0;
rewind(chemfile);
fgets(line, line_width, chemfile);
while (keymatch(line, "MINERALS", tmpval, tmpstr) != 1)
fgets(line, line_width, chemfile);
fgets(line, line_width, chemfile);
while (keymatch(line, "END", tmpval, tmpstr) != 1)
{
if (keymatch(line, " ", tmpval, tmpstr) != 2)
{
strcpy(CD->kinetics[k].species, tmpstr[0]);
if (strcmp(tmpstr[1], "-label") == 0)
strcpy(CD->kinetics[k].Label, tmpstr[2]);
k++;
}
fgets(line, line_width, chemfile);
}
for (i = 0; i < k; i++)
fprintf(stderr,
" Kinetic reaction on '%s' is specified, label '%s'. \n",
CD->kinetics[i].species, CD->kinetics[i].Label);
/* Precipitation conc read in */
fprintf(stderr, "\n Reading 'shp.prep': \n");
if (CD->PrpFlg == 2)
{
CD->TSD_prepconc = (tsdata_struct *)malloc(sizeof(tsdata_struct));
fscanf(prepconc, "%*s %d %d",
&(CD->TSD_prepconc[0].nspec), &(CD->TSD_prepconc[0].length));
CD->prepconcindex =
(int *)malloc(CD->TSD_prepconc[0].nspec * sizeof(int));
/* The number of primary species must be equal to the number of primary
* species specified before. */
for (i = 0; i < CD->TSD_prepconc[0].nspec; i++)
{
fscanf(prepconc, "%d", &(CD->prepconcindex[i]));
if (CD->prepconcindex[i] > 0)
{
assert(CD->prepconcindex[i] <= CD->NumSpc);
fprintf(stderr,
" Precipitation conc of '%s' is a time series. \n",
CD->chemtype[CD->prepconcindex[i] - 1].ChemName);
}
}
CD->TSD_prepconc[0].ftime =
(int *)malloc((CD->TSD_prepconc[0].length) * sizeof(int));
CD->TSD_prepconc[0].data =
(double **)malloc((CD->TSD_prepconc[0].length) * sizeof(double *));
CD->TSD_prepconc[0].value =
(double *)malloc(CD->TSD_prepconc[0].nspec * sizeof(double));
for (i = 0; i < CD->TSD_prepconc[0].length; i++)
{
CD->TSD_prepconc[0].data[i] =
(double *)malloc(CD->TSD_prepconc[0].nspec * sizeof(double));
NextLine(prepconc, cmdstr, &lno);
ReadTS(cmdstr, &CD->TSD_prepconc[0].ftime[i],
&CD->TSD_prepconc[0].data[i][0], CD->TSD_prepconc[0].nspec);
}
/* Convert pH to H+ concentration */
for (i = 0; i < CD->TSD_prepconc[0].nspec; i++)
{
if (CD->prepconcindex[i] > 0 &&
!strcmp(con_chem_name[num_conditions][CD->prepconcindex[i] - 1],
"pH"))
{
for (k = 0; k < CD->TSD_prepconc[0].length; k++)
{
CD->TSD_prepconc[0].data[k][i] =
(CD->TSD_prepconc[0].data[k][i] < 7.0) ?
pow(10, -CD->TSD_prepconc[0].data[k][i]) :
-pow(10, -CD->TSD_prepconc[0].data[k][i] - 14);
}
break;
}
}
}
/* PUMP block */
CD->CalGwinflux = pihm->cal.gwinflux;
fprintf(stderr, "\n Reading 'shp.chem' PUMP: \n");
rewind(chemfile);
fgets(line, line_width, chemfile);
while ((keymatch(line, "PUMP", tmpval, tmpstr) != 1) && (!feof(chemfile)))
fgets(line, line_width, chemfile);
CD->NumPUMP = tmpval[0];
fprintf(stderr, " %d pumps specified. \n", CD->NumPUMP);
CD->pumps = (Pump *) malloc(CD->NumPUMP * sizeof(Pump));
i = 0;
while (keymatch(line, "END", tmpval, tmpstr) != 1)
{
fgets(line, line_width, chemfile);
if (keymatch(line, " ", tmpval, tmpstr) != 2)
{
CD->pumps[i].Pump_Location = (int)tmpval[0];
CD->pumps[i].Injection_rate = (double)tmpval[1];
CD->pumps[i].Injection_conc = (double)tmpval[2];
CD->pumps[i].flow_rate =
CD->pumps[i].Injection_rate / CD->pumps[i].Injection_conc /
365 * 1E-3;
CD->pumps[i].Name_Species = (char *)malloc(20 * sizeof(char));
strcpy(CD->pumps[i].Name_Species, tmpstr[1]);
// wrap(CD->pumps[i].Name_Species);
CD->pumps[i].Position_Species = -1;
for (j = 0; j < CD->NumStc; j++)
{
if (!strcmp(CD->pumps[i].Name_Species,
CD->chemtype[j].ChemName))
{
CD->pumps[i].Position_Species = j;
}
}
fprintf(stderr,
" -- Rate %g [moles/year] of '%s' (pos: %d) at Grid '%d' with a concentration of %g [M/L]. \n",
CD->pumps[i].Injection_rate, CD->pumps[i].Name_Species,
(CD->pumps[i].Position_Species + 1), CD->pumps[i].Pump_Location,
CD->pumps[i].Injection_conc);
fprintf(stderr, " -- Flow rate is then %g [m3/d]. \n",
CD->pumps[i].flow_rate);
// CD->pumps[i].Injection_rate *= 1E-3 / 365;
/* 02.12 calibration */
CD->pumps[i].Injection_rate =
CD->pumps[i].Injection_rate * CD->CalGwinflux;
CD->pumps[i].flow_rate = CD->pumps[i].flow_rate * CD->CalGwinflux;
fprintf(stderr,
" -- after calibration: injection_rate %g [moles/year], flow _rate %g [m3/d], CD->CalGwinflux = %f. \n",
CD->pumps[i].Injection_rate, CD->pumps[i].flow_rate,
CD->CalGwinflux);
i++;
}
if (i >= CD->NumPUMP)
break;
}
/* End of reading input files */
/* Reading '*.maxwater' input file */
fprintf(stderr, "\n Reading 'coalcreek_952.maxwater': \n");
CD->Vcele = (vol_conc *) malloc(CD->NumVol * sizeof(vol_conc));
for (i = 0; i < CD->NumVol; i++)
{
CD->Vcele[i].maxwater = 0; /* Initialize, including ghost cells */
}
fscanf(maxwater, "%*[^\n]%*c"); /* Jump over the first header line */
for (i = 0; i < nelem; i++) /* GW cells */
{
fscanf(maxwater, "%*d %lf", &(CD->Vcele[RT_GW(i)].maxwater));
CD->Vcele[RT_UNSAT(i)].maxwater = CD->Vcele[RT_GW(i)].maxwater;
}
fclose(maxwater);
/* Initializing volumetric parameters, inherit from PIHM
* That is, if PIHM is started from a hot start, rt is also
* initialized with the hot data */
for (i = 0; i < nelem; i++)
{
/* Initializing volumetrics for groundwater (GW) cells */
CD->Vcele[RT_GW(i)].height_o = pihm->elem[i].ws.gw;
CD->Vcele[RT_GW(i)].height_t = pihm->elem[i].ws.gw;
CD->Vcele[RT_GW(i)].area = pihm->elem[i].topo.area;
CD->Vcele[RT_GW(i)].porosity = pihm->elem[i].soil.smcmax;
CD->Vcele[RT_GW(i)].vol_o = pihm->elem[i].topo.area * pihm->elem[i].ws.gw;
CD->Vcele[RT_GW(i)].vol = pihm->elem[i].topo.area * pihm->elem[i].ws.gw;
CD->Vcele[RT_GW(i)].sat = 1.0;
CD->Vcele[RT_GW(i)].type = GW_VOL;
/* Initializing volumetrics for unsaturated cells */
/* Porosity in PIHM is
* Effective Porosity = Porosity - Residue Water Porosity
* Porosity in RT is total Porosity, therefore, the water height in the
* unsaturated zone needs be converted as well */
CD->Vcele[RT_UNSAT(i)].height_o = (pihm->elem[i].ws.unsat *
(pihm->elem[i].soil.smcmax - pihm->elem[i].soil.smcmin) +
(pihm->elem[i].soil.depth - pihm->elem[i].ws.gw) *
pihm->elem[i].soil.smcmin) / (pihm->elem[i].soil.smcmax);
CD->Vcele[RT_UNSAT(i)].height_t = CD->Vcele[RT_UNSAT(i)].height_o;
CD->Vcele[RT_UNSAT(i)].area = pihm->elem[i].topo.area;
CD->Vcele[RT_UNSAT(i)].porosity = pihm->elem[i].soil.smcmax;
/* Unsaturated zone has the same porosity as saturated zone */
CD->Vcele[RT_UNSAT(i)].sat = CD->Vcele[RT_UNSAT(i)].height_o /
(pihm->elem[i].soil.depth - pihm->elem[i].ws.gw);
CD->Vcele[RT_UNSAT(i)].vol_o = pihm->elem[i].topo.area * CD->Vcele[RT_UNSAT(i)].height_o;
CD->Vcele[RT_UNSAT(i)].vol = pihm->elem[i].topo.area * pihm->elem[i].soil.depth;
CD->Vcele[RT_UNSAT(i)].type = UNSAT_VOL;
/* The saturation of unsaturated zone is the Hu divided by height of
* this cell */
if (CD->Vcele[RT_UNSAT(i)].sat > 1.0)
fprintf(stderr,
"Fatal Error, Unsaturated Zone Initialization For RT Failed!\n");
#if defined(_FBR_)
/* Initializing volumetrics for deep groundwater (FBR GW) cells */
CD->Vcele[RT_FBR_GW(i)].height_o = pihm->elem[i].ws.fbr_gw;
CD->Vcele[RT_FBR_GW(i)].height_t = pihm->elem[i].ws.fbr_gw;
CD->Vcele[RT_FBR_GW(i)].area = pihm->elem[i].topo.area;
CD->Vcele[RT_FBR_GW(i)].porosity = pihm->elem[i].geol.smcmax;
CD->Vcele[RT_FBR_GW(i)].vol_o = pihm->elem[i].topo.area * pihm->elem[i].ws.fbr_gw;
CD->Vcele[RT_FBR_GW(i)].vol = pihm->elem[i].topo.area * pihm->elem[i].ws.fbr_gw;
CD->Vcele[RT_FBR_GW(i)].sat = 1.0;
CD->Vcele[RT_FBR_GW(i)].type = FBR_GW_VOL;
/* Initializing volumetrics for bedrock unsaturated cells */
CD->Vcele[RT_FBR_UNSAT(i)].height_o = (pihm->elem[i].ws.fbr_unsat *
(pihm->elem[i].geol.smcmax - pihm->elem[i].geol.smcmin) +
(pihm->elem[i].geol.depth - pihm->elem[i].ws.fbr_gw) *
pihm->elem[i].geol.smcmin) / (pihm->elem[i].geol.smcmax);
CD->Vcele[RT_FBR_UNSAT(i)].height_t = CD->Vcele[RT_FBR_UNSAT(i)].height_o;
CD->Vcele[RT_FBR_UNSAT(i)].area = pihm->elem[i].topo.area;
CD->Vcele[RT_FBR_UNSAT(i)].porosity = pihm->elem[i].geol.smcmax;
/* Unsaturated zone has the same porosity as saturated zone */
CD->Vcele[RT_FBR_UNSAT(i)].sat = CD->Vcele[RT_FBR_UNSAT(i)].height_o /
(pihm->elem[i].geol.depth - pihm->elem[i].ws.fbr_gw);
CD->Vcele[RT_FBR_UNSAT(i)].vol_o = pihm->elem[i].topo.area * CD->Vcele[RT_FBR_UNSAT(i)].height_o;
CD->Vcele[RT_FBR_UNSAT(i)].vol = pihm->elem[i].topo.area * pihm->elem[i].geol.depth;
CD->Vcele[RT_FBR_UNSAT(i)].type = FBR_UNSAT_VOL;
/* The saturation of unsaturated zone is the Hu divided by height of
* this cell */
if (CD->Vcele[RT_FBR_UNSAT(i)].sat > 1.0)
fprintf(stderr,
"Fatal Error, FBR Unsaturated Zone Initialization For RT Failed!\n");
#endif
}
CD->CalPorosity = pihm->cal.porosity;
CD->CalRate = pihm->cal.rate;
CD->CalSSA = pihm->cal.ssa;
CD->CalPrcpconc = pihm->cal.prcpconc;
CD->CalInitconc = pihm->cal.initconc;
CD->CalXsorption = pihm->cal.Xsorption;
for (i = 0; i < nriver; i++)
{
/* Initializing volumetrics for river cells */
CD->Vcele[RT_RIVER(i)].height_o = pihm->river[i].ws.gw;
CD->Vcele[RT_RIVER(i)].height_t = pihm->river[i].ws.gw;
CD->Vcele[RT_RIVER(i)].area = pihm->river[i].topo.area;
CD->Vcele[RT_RIVER(i)].porosity = 1.0;
CD->Vcele[RT_RIVER(i)].sat = 1.0;
CD->Vcele[RT_RIVER(i)].vol_o = pihm->river[i].topo.area * pihm->river[i].ws.gw;
CD->Vcele[RT_RIVER(i)].vol = pihm->river[i].topo.area * pihm->river[i].ws.gw;
CD->Vcele[RT_RIVER(i)].type = RIVER_VOL;
}
/* Initialize virtual cell */
CD->Vcele[PRCP_VOL - 1].height_o = 0.0;
CD->Vcele[PRCP_VOL - 1].height_t = 0.0;
CD->Vcele[PRCP_VOL - 1].area = 0.0;
CD->Vcele[PRCP_VOL - 1].porosity = 0.0;
CD->Vcele[PRCP_VOL - 1].sat = 0.0;
CD->Vcele[PRCP_VOL - 1].vol_o = 0.0;
CD->Vcele[PRCP_VOL - 1].vol = 0.0;
CD->Vcele[VIRTUAL_VOL - 1].height_o = 1.0;
CD->Vcele[VIRTUAL_VOL - 1].height_t = 1.0;
CD->Vcele[VIRTUAL_VOL - 1].area = 1.0;
CD->Vcele[VIRTUAL_VOL - 1].porosity = 1.0;
CD->Vcele[VIRTUAL_VOL - 1].sat = 1.0;
CD->Vcele[VIRTUAL_VOL - 1].vol_o = 1.0;
CD->Vcele[VIRTUAL_VOL - 1].vol = 1.0;
for (i = 0; i < CD->NumSpc; i++)
{
if (strcmp(CD->chemtype[i].ChemName, "pH") == 0)
{
strcpy(CD->chemtype[i].ChemName, "H+");
speciation_flg = 1;
}
}
/* Initializing concentration distributions */
fprintf(stderr,
"\n Initializing concentration, Vcele [i, 0 ~ NumVol]... \n");
for (i = 0; i < CD->NumVol; i++)
{
CD->Vcele[i].index = i + 1;
CD->Vcele[i].t_conc = (double *)calloc(CD->NumStc, sizeof(double));
CD->Vcele[i].p_conc = (double *)calloc(CD->NumStc, sizeof(double));
CD->Vcele[i].s_conc = (double *)calloc(CD->NumSsc, sizeof(double));
CD->Vcele[i].p_actv = (double *)calloc(CD->NumStc, sizeof(double));
CD->Vcele[i].p_para = (double *)calloc(CD->NumStc, sizeof(double));
CD->Vcele[i].p_type = (int *)calloc(CD->NumStc, sizeof(int));
CD->Vcele[i].log10_pconc = (double *)calloc(CD->NumStc, sizeof(double));
CD->Vcele[i].log10_sconc = (double *)calloc(CD->NumSsc, sizeof(double));
CD->Vcele[i].btcv_pconc = (double *)calloc(CD->NumStc, sizeof(double));
CD->Vcele[i].illness = 0;
for (j = 0; j < CD->NumStc; j++)
{
if ((speciation_flg == 1) &&
(strcmp(CD->chemtype[j].ChemName, "H+") == 0))
{
CD->Vcele[i].p_conc[j] = pow(10,
-(Condition_vcele[condition_index[i] - 1].t_conc[j]));
CD->Vcele[i].t_conc[j] = CD->Vcele[i].p_conc[j];
CD->Vcele[i].p_actv[j] = CD->Vcele[i].p_conc[j];
CD->Vcele[i].t_conc[j] = CD->Vcele[i].p_conc[j];
CD->Vcele[i].p_type[j] = 1;
}
else if (CD->chemtype[j].itype == MINERAL)
{
CD->Vcele[i].t_conc[j] =
Condition_vcele[condition_index[i] - 1].t_conc[j];
CD->Vcele[i].p_conc[j] = CD->Vcele[i].t_conc[j];
CD->Vcele[i].p_actv[j] = 1.0;
CD->Vcele[i].p_para[j] =
Condition_vcele[condition_index[i] - 1].p_para[j];
CD->Vcele[i].p_type[j] =
Condition_vcele[condition_index[i] - 1].p_type[j];
}
else
{
if (strcmp(CD->chemtype[j].ChemName, "DOC") == 0)
{
CD->Vcele[i].t_conc[j] = CD->CalInitconc *
Condition_vcele[condition_index[i] - 1].t_conc[j];
}
else
{
CD->Vcele[i].t_conc[j] =
Condition_vcele[condition_index[i] - 1].t_conc[j];
}
CD->Vcele[i].p_conc[j] = CD->Vcele[i].t_conc[j] * 0.5;
CD->Vcele[i].p_actv[j] = CD->Vcele[i].p_conc[j];
CD->Vcele[i].p_para[j] =
Condition_vcele[condition_index[i] - 1].p_para[j];
CD->Vcele[i].p_type[j] =
Condition_vcele[condition_index[i] - 1].p_type[j];
}
}
for (j = 0; j < CD->NumSsc; j++)
{
CD->Vcele[i].s_conc[j] = ZERO;
}
}
/*
* Beginning configuring the connectivity for flux
*/
for (i = 0; i < nelem; i++)
{
total_area += pihm->elem[i].topo.area;
}
CD->NumFac = NUM_EDGE * nelem * 2 + 3 * nelem + 6 * nriver;
CD->NumDis = 2 * 3 * nelem + 3 * nelem;
fprintf(stderr, "\n Total area of the watershed is %f [m^2]. \n",
total_area);
for (i = 0; i < CD->NumPUMP; i++)
{
CD->pumps[i].flow_rate = CD->pumps[i].flow_rate;
fprintf(stderr, "\n PUMP rate is specified %g [m^3/d]. \n",
CD->pumps[i].flow_rate);
}
/* Configuring the lateral connectivity of GW grid blocks */
fprintf(stderr,
"\n Configuring the lateral connectivity of GW grid blocks... \n");
CD->Flux = (face *) malloc(CD->NumFac * sizeof(face));
for (i = 0; i < nelem; i++)
{
int elemlo;
int elemuu;
int elemll;
double distance;
for (j = 0; j < 3; j++)
{
if (pihm->elem[i].nabr[j] != NO_FLOW)
{
elemlo = pihm->elem[i].nabr[j];
elemuu = (pihm->elem[i].nabr[j] > 0) ?
upstream(pihm->elem[i],
pihm->elem[pihm->elem[i].nabr[j] - 1], pihm) : 0;
elemll = (pihm->elem[i].nabr[j] > 0) ?
upstream(pihm->elem[pihm->elem[i].nabr[j] - 1],
pihm->elem[i], pihm) : 0;
distance = Dist2Edge(&pihm->meshtbl, &pihm->elem[i], j);
/* Initialize GW fluxes */
CD->Flux[RT_LAT_GW(i, j)].nodeup = CD->Vcele[RT_GW(i)].index;
CD->Flux[RT_LAT_GW(i, j)].node_trib = 0;
CD->Flux[RT_LAT_GW(i, j)].nodelo = (elemlo > 0) ?
CD->Vcele[RT_GW(elemlo - 1)].index :
CD->Vcele[RT_RIVER(-elemlo - 1)].index;
CD->Flux[RT_LAT_GW(i, j)].nodeuu = (elemuu > 0) ?
CD->Vcele[RT_GW(elemuu - 1)].index : 0;
CD->Flux[RT_LAT_GW(i, j)].nodell = (elemll > 0) ?
CD->Vcele[RT_GW(elemll - 1)].index : 0;
CD->Flux[RT_LAT_GW(i, j)].flux_trib = 0.0;
CD->Flux[RT_LAT_GW(i, j)].BC = DISPERSION;
CD->Flux[RT_LAT_GW(i, j)].distance = distance;
/* Initialize unsat zone fluxes */
CD->Flux[RT_LAT_UNSAT(i, j)].nodeup = CD->Vcele[RT_UNSAT(i)].index;
CD->Flux[RT_LAT_UNSAT(i, j)].node_trib = 0;
CD->Flux[RT_LAT_UNSAT(i, j)].nodelo = (elemlo > 0) ?
CD->Vcele[RT_UNSAT(elemlo - 1)].index :
CD->Vcele[RT_RIVER(-elemlo - 1)].index;
CD->Flux[RT_LAT_UNSAT(i, j)].nodeuu = (elemuu > 0) ?
CD->Vcele[RT_UNSAT(elemuu - 1)].index :0;
CD->Flux[RT_LAT_UNSAT(i, j)].nodell = (elemll > 0) ?
CD->Vcele[RT_UNSAT(elemll - 1)].index : 0;
CD->Flux[RT_LAT_UNSAT(i, j)].flux_trib = 0.0;
CD->Flux[RT_LAT_UNSAT(i, j)].BC = DISPERSION;
CD->Flux[RT_LAT_UNSAT(i, j)].distance = distance;
}
else
{
CD->Flux[RT_LAT_GW(i, j)].nodeup = CD->Vcele[RT_GW(i)].index;
CD->Flux[RT_LAT_GW(i, j)].node_trib = 0;
CD->Flux[RT_LAT_GW(i, j)].nodelo = 0;
CD->Flux[RT_LAT_GW(i, j)].nodeuu = 0;
CD->Flux[RT_LAT_GW(i, j)].nodell = 0;
CD->Flux[RT_LAT_GW(i, j)].flux_trib = 0.0;
CD->Flux[RT_LAT_GW(i, j)].BC = NO_FLOW;
CD->Flux[RT_LAT_GW(i, j)].distance = 0.0;
CD->Flux[RT_LAT_UNSAT(i, j)].nodeup = CD->Vcele[RT_UNSAT(i)].index;;
CD->Flux[RT_LAT_UNSAT(i, j)].node_trib = 0;
CD->Flux[RT_LAT_UNSAT(i, j)].nodelo = 0;
CD->Flux[RT_LAT_UNSAT(i, j)].nodeuu = 0;
CD->Flux[RT_LAT_UNSAT(i, j)].nodell = 0;
CD->Flux[RT_LAT_UNSAT(i, j)].flux_trib = 0.0;
CD->Flux[RT_LAT_UNSAT(i, j)].BC = NO_FLOW;
CD->Flux[RT_LAT_UNSAT(i, j)].distance = 0.0;
}
}
/* Infiltration */
CD->Flux[RT_INFIL(i)].nodeup = CD->Vcele[RT_UNSAT(i)].index;
CD->Flux[RT_INFIL(i)].node_trib = 0;
CD->Flux[RT_INFIL(i)].nodelo = PRCP_VOL;
CD->Flux[RT_INFIL(i)].nodeuu = 0;
CD->Flux[RT_INFIL(i)].nodell = 0;
CD->Flux[RT_INFIL(i)].flux_trib = 0.0;
CD->Flux[RT_INFIL(i)].BC = NO_DISP;
CD->Flux[RT_INFIL(i)].distance = 0.0;
/* Rechage centered at unsat blocks */
CD->Flux[RT_RECHG_UNSAT(i)].nodeup = CD->Vcele[RT_UNSAT(i)].index;
CD->Flux[RT_RECHG_UNSAT(i)].node_trib = 0;
CD->Flux[RT_RECHG_UNSAT(i)].nodelo = CD->Vcele[RT_GW(i)].index;
CD->Flux[RT_RECHG_UNSAT(i)].nodeuu = 0;
CD->Flux[RT_RECHG_UNSAT(i)].nodell = 0;
CD->Flux[RT_RECHG_UNSAT(i)].flux_trib = 0.0;
CD->Flux[RT_RECHG_UNSAT(i)].BC = DISPERSION;
CD->Flux[RT_RECHG_UNSAT(i)].distance = 0.1 * pihm->elem[i].soil.depth;
/* Recharge centered at gw blocks */
CD->Flux[RT_RECHG_GW(i)].nodeup = CD->Vcele[RT_GW(i)].index;
CD->Flux[RT_RECHG_GW(i)].node_trib = 0;
CD->Flux[RT_RECHG_GW(i)].nodelo = CD->Vcele[RT_UNSAT(i)].index;
CD->Flux[RT_RECHG_GW(i)].nodeuu = 0;
CD->Flux[RT_RECHG_GW(i)].nodell = 0;
CD->Flux[RT_RECHG_GW(i)].flux_trib = 0.0;
CD->Flux[RT_RECHG_GW(i)].BC = DISPERSION;
CD->Flux[RT_RECHG_GW(i)].distance = 0.1 * pihm->elem[i].soil.depth;
}
/* Configuring the vertical connectivity of UNSAT - GW blocks */
fprintf(stderr,
"\n Configuring the vertical connectivity of UNSAT - GW grid blocks... \n");
/* Configuring the connectivity of RIVER and EBR blocks */
fprintf(stderr,
"\n Configuring the connectivity of RIVER & EBR grid blocks... \n");
for (i = 0; i < nriver; i++)
{
/* Between River and Left */
/* River to left OFL 2 */
CD->Flux[RT_LEFT_SURF2RIVER(i)].nodeup = CD->Vcele[RT_RIVER(i)].index;
CD->Flux[RT_LEFT_SURF2RIVER(i)].node_trib = 0;
CD->Flux[RT_LEFT_SURF2RIVER(i)].nodelo = VIRTUAL_VOL;
CD->Flux[RT_LEFT_SURF2RIVER(i)].nodeuu = 0;
CD->Flux[RT_LEFT_SURF2RIVER(i)].nodell = 0;
CD->Flux[RT_LEFT_SURF2RIVER(i)].BC = NO_DISP;
CD->Flux[RT_LEFT_SURF2RIVER(i)].flux = 0.0;
CD->Flux[RT_LEFT_SURF2RIVER(i)].flux_trib = 0.0;
CD->Flux[RT_LEFT_SURF2RIVER(i)].distance = 1.0;
CD->Flux[RT_LEFT_SURF2RIVER(i)].s_area = 0.0;
/* Between River and Right */
/* River to right OFL 3 */
CD->Flux[RT_RIGHT_SURF2RIVER(i)].nodeup = CD->Vcele[RT_RIVER(i)].index;
CD->Flux[RT_RIGHT_SURF2RIVER(i)].node_trib = 0;
CD->Flux[RT_RIGHT_SURF2RIVER(i)].nodelo = VIRTUAL_VOL;
CD->Flux[RT_RIGHT_SURF2RIVER(i)].nodeuu = 0;
CD->Flux[RT_RIGHT_SURF2RIVER(i)].nodell = 0;
CD->Flux[RT_RIGHT_SURF2RIVER(i)].BC = NO_DISP;
CD->Flux[RT_RIGHT_SURF2RIVER(i)].flux = 0.0;
CD->Flux[RT_RIGHT_SURF2RIVER(i)].flux_trib = 0.0;
CD->Flux[RT_RIGHT_SURF2RIVER(i)].distance = 1.0;
CD->Flux[RT_RIGHT_SURF2RIVER(i)].s_area = 0.0;
/* Between Left and EBR */
/* EBR to left 7 + 4 */
CD->Flux[RT_LEFT_AQIF2RIVER(i)].nodeup = CD->Vcele[RT_RIVER(i)].index;
CD->Flux[RT_LEFT_AQIF2RIVER(i)].node_trib = 0;
CD->Flux[RT_LEFT_AQIF2RIVER(i)].nodelo =
CD->Vcele[RT_GW(pihm->river[i].leftele - 1)].index;
CD->Flux[RT_LEFT_AQIF2RIVER(i)].nodeuu = 0;
CD->Flux[RT_LEFT_AQIF2RIVER(i)].nodell = 0;
CD->Flux[RT_LEFT_AQIF2RIVER(i)].BC = DISPERSION;
CD->Flux[RT_LEFT_AQIF2RIVER(i)].flux = 0.0;
CD->Flux[RT_LEFT_AQIF2RIVER(i)].flux_trib = 0.0;
for (j = 0; j < NUM_EDGE; j++)
{
if (-pihm->elem[pihm->river[i].leftele - 1].nabr[j] == i + 1)
{
CD->Flux[RT_LEFT_AQIF2RIVER(i)].distance =
CD->Flux[RT_LAT_GW(pihm->river[i].leftele - 1, j)].distance;
break;
}
}
CD->Flux[RT_LEFT_AQIF2RIVER(i)].s_area = pihm->river[i].shp.length *
pihm->elem[pihm->river[i].leftele - 1].soil.depth;
/* Between Right and EBR */
/* EBR to right 8 + 5 */
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].nodeup = CD->Vcele[RT_RIVER(i)].index;
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].node_trib = 0;
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].nodelo =
CD->Vcele[RT_GW(pihm->river[i].rightele - 1)].index;
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].nodeuu = 0;
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].nodell = 0;
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].BC = DISPERSION;
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].flux = 0.0;
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].flux_trib = 0.0;
for (j = 0; j < NUM_EDGE; j++)
{
if (-pihm->elem[pihm->river[i].rightele - 1].nabr[j] == i + 1)
{
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].distance =
CD->Flux[RT_LAT_GW(pihm->river[i].rightele - 1, j)].distance;
break;
}
}
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].s_area =
pihm->river[i].shp.length *
pihm->elem[pihm->river[i].rightele - 1].soil.depth;
/* Between EBR */
/* To downstream EBR 9 */
CD->Flux[RT_DOWN_RIVER2RIVER(i)].nodeup = CD->Vcele[RT_RIVER(i)].index;
CD->Flux[RT_DOWN_RIVER2RIVER(i)].nodelo = (pihm->river[i].down < 0) ?
VIRTUAL_VOL : CD->Vcele[RT_RIVER(pihm->river[i].down - 1)].index;
CD->Flux[RT_DOWN_RIVER2RIVER(i)].node_trib = 0;
CD->Flux[RT_DOWN_RIVER2RIVER(i)].nodeuu = 0;
CD->Flux[RT_DOWN_RIVER2RIVER(i)].nodell = 0;
CD->Flux[RT_DOWN_RIVER2RIVER(i)].BC = NO_DISP;
CD->Flux[RT_DOWN_RIVER2RIVER(i)].flux = 0.0;
CD->Flux[RT_DOWN_RIVER2RIVER(i)].flux_trib = 0.0;
CD->Flux[RT_DOWN_RIVER2RIVER(i)].distance = 1.0;
CD->Flux[RT_DOWN_RIVER2RIVER(i)].s_area = 0.0;
/* From upstream EBR 10 */
CD->Flux[RT_UP_RIVER2RIVER(i)].nodeup = CD->Vcele[RT_RIVER(i)].index;
CD->Flux[RT_UP_RIVER2RIVER(i)].nodelo = (pihm->river[i].up[0] < 0) ?
VIRTUAL_VOL : CD->Vcele[RT_RIVER(pihm->river[i].up[0] - 1)].index;
CD->Flux[RT_UP_RIVER2RIVER(i)].node_trib = (pihm->river[i].up[1] < 0) ?
pihm->river[i].up[1] :
CD->Vcele[RT_RIVER(pihm->river[i].up[1] - 1)].index;
CD->Flux[RT_UP_RIVER2RIVER(i)].nodeuu = 0;
CD->Flux[RT_UP_RIVER2RIVER(i)].nodell = 0;
CD->Flux[RT_UP_RIVER2RIVER(i)].BC = NO_DISP;
CD->Flux[RT_UP_RIVER2RIVER(i)].flux = 0.0;
CD->Flux[RT_UP_RIVER2RIVER(i)].flux_trib = 0.0;
CD->Flux[RT_UP_RIVER2RIVER(i)].distance = 1.0;
CD->Flux[RT_UP_RIVER2RIVER(i)].s_area = 0.0;
}
for (k = 0; k < CD->NumFac; k++)
{
CD->Flux[k].velocity = 0.0;
CD->Flux[k].flux = 0.0; /* Initialize 0.0 for above sections of GW-GW,
* UNSAT-UNSAT, GW-UNSAT, UNSAT-GW */
CD->Flux[k].flux_trib = 0.0;
CD->Flux[k].s_area = 0.0;
}
CD->SPCFlg = speciation_flg;
Lookup(database, CD);
/* Update the concentration of mineral after get the molar volume of
* mineral */
double Cal_PCO2 = 1.0;
double Cal_Keq = 1.0;
for (i = 0; i < CD->NumAkr + CD->NumMkr; i++)
{
if (!strcmp(CD->chemtype[i + CD->NumSpc + CD->NumAds +
CD->NumCex].ChemName, "'CO2(*g)'"))
{
CD->KeqKinect[i] += log10(Cal_PCO2);
}
else
{
CD->KeqKinect[i] += log10(Cal_Keq);
}
}
fprintf(stderr, "\n Kinetic Mass Matrx (calibrated Keq)! \n");
fprintf(stderr, "%-15s", " ");
for (i = 0; i < CD->NumStc; i++)
fprintf(stderr, "%-14s", CD->chemtype[i].ChemName);
fprintf(stderr, "\n");
for (j = 0; j < CD->NumMkr + CD->NumAkr; j++)
{
fprintf(stderr, " %-14s",
CD->chemtype[j + CD->NumSpc + CD->NumAds + CD->NumCex].ChemName);
for (i = 0; i < CD->NumStc; i++)
{
fprintf(stderr, "%-14.2f", CD->Dep_kinetic[j][i]);
}
fprintf(stderr, " Keq = %-6.2f\n", CD->KeqKinect[j]);
}
fprintf(stderr, "\n");
/* Use calibration coefficient to produce new Keq values for
* 1) CO2, 2) other kinetic reaction */
fprintf(stderr,
" \n Mass action species type determination (0: immobile, 1: mobile, 2: Mixed) \n");
for (i = 0; i < CD->NumSpc; i++)
{
if (CD->chemtype[i].itype == AQUEOUS)
CD->chemtype[i].mtype = 1;
else
CD->chemtype[i].mtype = 0;
for (j = 0; j < CD->NumStc + CD->NumSsc; j++)
{
if ((CD->Totalconc[i][j] != 0) &&
(CD->chemtype[j].itype != CD->chemtype[i].mtype))
CD->chemtype[i].mtype = 2;
}
/*
* if (strcmp( CD->chemtype[i].ChemName, "'H+'") == 0)
* CD->chemtype[i].mtype = 1;
*/
fprintf(stderr, " %12s\t%10d\n", CD->chemtype[i].ChemName,
CD->chemtype[i].mtype);
}
fprintf(stderr,
" \n Individual species type determination (1: aqueous, 2: adsorption, 3: ion exchange, 4: solid) \n");
for (i = 0; i < CD->NumStc + CD->NumSsc; i++)
{
fprintf(stderr, " %12s\t%10d\n", CD->chemtype[i].ChemName,
CD->chemtype[i].itype);
}
for (i = 0; i < CD->NumVol; i++)
{
for (j = 0; j < CD->NumStc; j++)
{
if (CD->chemtype[j].itype == MINERAL)
{
if (CD->RelMin == 0)
{
/* Absolute mineral volume fraction */
CD->Vcele[i].t_conc[j] =
CD->Vcele[i].t_conc[j] * 1000 /
CD->chemtype[j].MolarVolume / CD->Vcele[i].porosity;
CD->Vcele[i].p_conc[j] = CD->Vcele[i].t_conc[j];
}
if (CD->RelMin == 1)
{
/* Relative mineral volume fraction */
/* Porosity can be 1.0 so the relative fraction option needs
* a small modification */
CD->Vcele[i].t_conc[j] = CD->Vcele[i].t_conc[j] *
(1 - CD->Vcele[i].porosity + INFTYSMALL) * 1000 /
CD->chemtype[j].MolarVolume / CD->Vcele[i].porosity;
CD->Vcele[i].p_conc[j] = CD->Vcele[i].t_conc[j];
}
}
if ((CD->chemtype[j].itype == CATION_ECHG) ||
(CD->chemtype[j].itype == ADSORPTION))
{
/* Change the unit of CEC (eq/g) into C(ion site)
* (eq/L porous space), assuming density of solid is always
* 2650 g/L solid */
CD->Vcele[i].t_conc[j] =
CD->Vcele[i].t_conc[j] * (1 - CD->Vcele[i].porosity) * 2650;
CD->Vcele[i].p_conc[j] = CD->Vcele[i].t_conc[j];
}
}
}
CD->SPCFlg = 1;
if (!CD->RecFlg)
{
for (i = 0; i < nelem; i++)
{
Speciation(CD, RT_GW(i));
}
}
CD->SPCFlg = 0;
/* Initialize unsaturated zone concentrations to be the same as in saturated
* zone */
for (i = 0; i < nelem; i++)
{
for (k = 0; k < CD->NumStc; k++)
{
CD->Vcele[RT_UNSAT(i)].t_conc[k] = CD->Vcele[RT_GW(i)].t_conc[k];
CD->Vcele[RT_UNSAT(i)].p_conc[k] = CD->Vcele[RT_GW(i)].p_conc[k];
CD->Vcele[RT_UNSAT(i)].p_actv[k] = CD->Vcele[RT_GW(i)].p_actv[k];
}
}
/* Initialize river concentrations */
for (i = 0; i < nriver; i++)
{
for (k = 0; k < CD->NumStc; k++)
{
if (CD->chemtype[k].itype != AQUEOUS)
{
CD->Vcele[RT_RIVER(i)].t_conc[k] = 1.0E-20;
CD->Vcele[RT_RIVER(i)].p_conc[k] = 1.0E-20;
CD->Vcele[RT_RIVER(i)].p_actv[k] = 1.0E-20;
}
}
}
for (i = 0; i < num_conditions; i++)
{
free(Condition_vcele[i].t_conc);
free(Condition_vcele[i].p_conc);
free(Condition_vcele[i].p_para);
free(Condition_vcele[i].p_type);
}
free(Condition_vcele);
for (i = 0; i < num_conditions; i++)
free(chemcon[i]);
free(chemcon);
for (i = 0; i < num_conditions + 1; i++)
{
for (j = 0; j < CD->NumStc; j++)
free(con_chem_name[i][j]);
free(con_chem_name[i]);
}
free(con_chem_name);
free(chemfn);
free(datafn);
free(forcfn);
free(condition_index);
free(Global_type.ChemName);
for (i = 0; i < words_line; i++)
{
free(tmpstr[i]);
}
free(tmpstr);
fclose(chemfile);
fclose(database);
fclose(prepconc);
}
void fluxtrans(int t, int stepsize, const pihm_struct pihm, Chem_Data CD,
double *t_duration_transp, double *t_duration_react)
{
/* unit of t and stepsize: min
* swi irreducible water saturation
* hn non mobile water height
* ht transient zone height
*/
int i, k = 0;
double rt_step, invavg, unit_c;
unit_c = stepsize / UNIT_C;
int VIRTUAL_VOL = CD->NumVol;
int PRCP_VOL = CD->NumVol - 1;
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
int j;
for (j = 0; j < 3; j++)
{
if (pihm->elem[i].nabr[j] != NO_FLOW)
{
/* Flux for GW lateral flow */
CD->Flux[RT_LAT_GW(i, j)].flux += 1.0 * pihm->elem[i].wf.subsurf[j] * 86400; /* Test lateral dilution */
/* Flux for UNSAT lateral flow */
CD->Flux[RT_LAT_UNSAT(i, j)].s_area = 1.0;
}
}
/* Flux for UNSAT - GW vertical flow */
CD->Flux[RT_RECHG_UNSAT(i)].flux += (pihm->elem[i].wf.rechg * 86400) *
CD->Vcele[RT_UNSAT(i)].area;
CD->Flux[RT_RECHG_GW(i)].flux += (-pihm->elem[i].wf.rechg * 86400) *
CD->Vcele[RT_GW(i)].area;
}
/* Flux for RIVER flow */
for (i = 0; i < nriver; i++)
{
if (pihm->river[i].down < 0)
{
CD->riv += pihm->river[i].wf.rivflow[1] * 86400;
}
}
if ((t - pihm->ctrl.starttime / 60) % 1440 == 0)
{
CD->rivd = CD->riv / 1440; /* Averaging the sum of 1440 mins for a
* daily discharge, rivFlx1 */
CD->riv = 0;
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < nriver; i++)
{
CD->Flux[RT_LEFT_SURF2RIVER(i)].flux += pihm->river[i].wf.rivflow[2] * 86400;
CD->Flux[RT_RIGHT_SURF2RIVER(i)].flux += pihm->river[i].wf.rivflow[3] * 86400;
CD->Flux[RT_LEFT_AQIF2RIVER(i)].flux += pihm->river[i].wf.rivflow[7] * 86400 +
pihm->river[i].wf.rivflow[4] * 86400;
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].flux += pihm->river[i].wf.rivflow[8] * 86400 +
pihm->river[i].wf.rivflow[5] * 86400;
CD->Flux[RT_DOWN_RIVER2RIVER(i)].flux += pihm->river[i].wf.rivflow[9] * 86400 +
pihm->river[i].wf.rivflow[1] * 86400;
CD->Flux[RT_UP_RIVER2RIVER(i)].flux += pihm->river[i].wf.rivflow[10] * 86400 +
pihm->river[i].wf.rivflow[0] * 86400;
if (CD->Flux[RT_UP_RIVER2RIVER(i)].node_trib > 0)
{
CD->Flux[RT_UP_RIVER2RIVER(i)].flux_trib +=
-(pihm->river[pihm->river[i].up[1] - 1].wf.rivflow[9] * 86400 +
pihm->river[pihm->river[i].up[1] - 1].wf.rivflow[1] * 86400);
}
}
/*
* Update the cell volumetrics every averaging cycle
*/
if (t - pihm->ctrl.starttime / 60 - (int)CD->TimLst == CD->AvgScl * stepsize)
{
/* Update the concentration in precipitation here. */
if (CD->PrpFlg == 2)
{
IntrplForc(&CD->TSD_prepconc[0], t * 60, CD->TSD_prepconc[0].nspec,
NO_INTRPL);
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < CD->TSD_prepconc[0].nspec; i++)
{
if (CD->prepconcindex[i] > 0)
{
int ind;
ind = CD->prepconcindex[i] - 1;
if (CD->Precipitation.t_conc[ind] !=
CD->TSD_prepconc[0].value[i])
{
CD->Precipitation.t_conc[ind] =
CD->TSD_prepconc[0].value[i];
fprintf(stderr,
" %s in precipitation is changed to %6.4g\n",
CD->chemtype[ind].ChemName,
CD->Precipitation.t_conc[ind]);
}
}
}
}
#ifdef _OPENMP
# pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
CD->Vcele[RT_GW(i)].height_o = CD->Vcele[RT_GW(i)].height_t;
CD->Vcele[RT_GW(i)].height_t = MAX(pihm->elem[i].ws.gw, 1.0E-5);
CD->Vcele[RT_GW(i)].height_int = CD->Vcele[RT_GW(i)].height_t;
CD->Vcele[RT_GW(i)].height_sp =
(CD->Vcele[RT_GW(i)].height_t - CD->Vcele[RT_GW(i)].height_o) * invavg;
CD->Vcele[RT_GW(i)].vol_o =
CD->Vcele[RT_GW(i)].area * CD->Vcele[RT_GW(i)].height_o;
CD->Vcele[RT_GW(i)].vol =
CD->Vcele[RT_GW(i)].area * CD->Vcele[RT_GW(i)].height_t;
/* Update the unsaturated zone (vadoze) */
CD->Vcele[RT_UNSAT(i)].height_o = CD->Vcele[RT_UNSAT(i)].height_t;
CD->Vcele[RT_UNSAT(i)].height_t =
MAX(((pihm->elem[i].ws.unsat * (pihm->elem[i].soil.smcmax -
pihm->elem[i].soil.smcmin) +
(pihm->elem[i].soil.depth - CD->Vcele[RT_GW(i)].height_t) *
pihm->elem[i].soil.smcmin) / pihm->elem[i].soil.smcmax),
1.0E-5);
CD->Vcele[RT_UNSAT(i)].height_int = CD->Vcele[RT_UNSAT(i)].height_t;
CD->Vcele[RT_UNSAT(i)].height_sp =
(CD->Vcele[RT_UNSAT(i)].height_t - CD->Vcele[RT_UNSAT(i)].height_o) * invavg;
CD->Vcele[RT_UNSAT(i)].vol_o = CD->Vcele[RT_UNSAT(i)].area * CD->Vcele[RT_UNSAT(i)].height_o;
CD->Vcele[RT_UNSAT(i)].vol = CD->Vcele[RT_UNSAT(i)].area * CD->Vcele[RT_UNSAT(i)].height_t;
CD->Vcele[RT_UNSAT(i)].sat = CD->Vcele[RT_UNSAT(i)].height_t /
(pihm->elem[i].soil.depth - CD->Vcele[RT_GW(i)].height_t);
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
/* Update river cells */
for (i = 0; i < nriver; i++)
{
CD->Vcele[RT_RIVER(i)].height_o = CD->Vcele[RT_RIVER(i)].height_t;
CD->Vcele[RT_RIVER(i)].height_t = MAX(pihm->river[i].ws.gw, 1.0E-5) +
MAX(pihm->river[i].ws.stage, 1.0E-5) / CD->Vcele[RT_RIVER(i)].porosity;
CD->Vcele[RT_RIVER(i)].height_int = CD->Vcele[RT_RIVER(i)].height_t;
CD->Vcele[RT_RIVER(i)].height_sp =
(CD->Vcele[RT_RIVER(i)].height_t - CD->Vcele[RT_RIVER(i)].height_o) * invavg;
CD->Vcele[RT_RIVER(i)].area = pihm->river[i].topo.area;
CD->Vcele[RT_RIVER(i)].vol_o = CD->Vcele[RT_RIVER(i)].area * CD->Vcele[RT_RIVER(i)].height_o;
CD->Vcele[RT_RIVER(i)].vol = CD->Vcele[RT_RIVER(i)].area * CD->Vcele[RT_RIVER(i)].height_t;
}
invavg = stepsize / (double)CD->AvgScl;
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (k = 0; k < CD->NumFac; k++)
{
CD->Flux[k].flux *= invavg;
}
/*
* Correct recharge and infiltration to converve mass balance
*/
Monitor(stepsize * (double)CD->AvgScl, pihm, CD);
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
int j;
/* For gw cells, contact area is needed for dispersion; */
for (j = 0; j < 3; j++)
{
double h1, h2;
double area;
if (CD->Flux[RT_LAT_GW(i, j)].BC == NO_FLOW)
{
continue;
}
if (pihm->elem[i].nabr[j] > 0)
{
h1 = 0.5 *
(CD->Vcele[RT_GW(i)].height_o +
CD->Vcele[RT_GW(i)].height_t);
h2 = 0.5 *
(CD->Vcele[RT_GW(pihm->elem[i].nabr[j] - 1)].height_o +
CD->Vcele[RT_GW(pihm->elem[i].nabr[j] - 1)].height_t);
CD->Flux[RT_LAT_GW(i, j)].s_area =
(CD->Flux[RT_LAT_GW(i, j)].flux > 0.0) ?
pihm->elem[i].topo.edge[j] * h1 :
pihm->elem[i].topo.edge[j] * h2;
}
else if (pihm->elem[i].nabr[j] < 0)
{
h1 = 0.5 *
(CD->Vcele[RT_GW(i)].height_o +
CD->Vcele[RT_GW(i)].height_t);
h2 = 0.5 *
(CD->Vcele[RT_RIVER(-pihm->elem[i].nabr[j] - 1)].height_o +
CD->Vcele[RT_RIVER(-pihm->elem[i].nabr[j] - 1)].height_t);
CD->Flux[RT_LAT_GW(i, j)].s_area =
(CD->Flux[RT_LAT_GW(i, j)].flux > 0.0) ?
pihm->elem[i].topo.edge[j] * h1 :
pihm->elem[i].topo.edge[j] * h2;
}
/* Calculate velocity according to flux and area */
CD->Flux[RT_LAT_GW(i, j)].velocity =
(CD->Flux[RT_LAT_GW(i, j)].s_area > 1.0E-4) ?
CD->Flux[RT_LAT_GW(i, j)].flux /
CD->Flux[RT_LAT_GW(i, j)].s_area :
1.0E-10;
}
CD->Flux[RT_RECHG_UNSAT(i)].s_area = pihm->elem[i].topo.area;
CD->Flux[RT_RECHG_UNSAT(i)].velocity =
CD->Flux[RT_RECHG_UNSAT(i)].flux / pihm->elem[i].topo.area;
CD->Flux[RT_RECHG_GW(i)].s_area = pihm->elem[i].topo.area;
CD->Flux[RT_RECHG_GW(i)].velocity =
CD->Flux[RT_RECHG_GW(i)].flux / pihm->elem[i].topo.area;
}
/* Correct river flux area and velocity */
#ifdef _OPENMP
# pragma omp parallel for
#endif
for (i = 0; i < nriver; i++)
{
int j;
for (j = 0; j < NUM_EDGE; j++)
{
if (-pihm->elem[pihm->river[i].leftele - 1].nabr[j] == i + 1)
{
CD->Flux[RT_LEFT_AQIF2RIVER(i)].s_area =
CD->Flux[RT_LAT_GW(pihm->river[i].leftele - 1, j)].s_area;
CD->Flux[RT_LEFT_AQIF2RIVER(i)].velocity =
-CD->Flux[RT_LAT_GW(pihm->river[i].leftele - 1, j)].velocity;
break;
}
}
for (j = 0; j < NUM_EDGE; j++)
{
if (-pihm->elem[pihm->river[i].rightele - 1].nabr[j] == i + 1)
{
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].s_area =
CD->Flux[RT_LAT_GW(pihm->river[i].rightele - 1, j)].s_area;
CD->Flux[RT_RIGHT_AQIF2RIVER(i)].velocity =
-CD->Flux[RT_LAT_GW(pihm->river[i].rightele - 1, j)].velocity;
break;
}
}
}
/* Update virtual volume */
if (CD->PrpFlg)
{
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (k = 0; k < CD->NumSpc; k++)
{
CD->Vcele[PRCP_VOL - 1].t_conc[k] =
(strcmp(CD->chemtype[k].ChemName, "'DOC'") == 0) ?
CD->Precipitation.t_conc[k] * CD->Condensation *
CD->CalPrcpconc :
CD->Precipitation.t_conc[k] * CD->Condensation;
}
}
else
{
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (k = 0; k < CD->NumSpc; k++)
{
CD->Vcele[PRCP_VOL - 1].t_conc[k] = 0.0;
}
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (k = 0; k < CD->NumStc; k++)
{
CD->Vcele[VIRTUAL_VOL - 1].t_conc[k] =
CD->Precipitation.t_conc[k] * CD->Condensation;
CD->Vcele[VIRTUAL_VOL - 1].p_conc[k] =
CD->Precipitation.t_conc[k] * CD->Condensation;
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < CD->NumVol; i++)
{
CD->Vcele[i].rt_step = 0.0;
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < CD->NumDis; i++)
{
int j;
double peclet;
if (CD->Flux[i].BC != NO_DISP)
{
for (j = 0; j < CD->NumSpc; j++)
{
peclet = fabs(CD->Flux[i].velocity * CD->Flux[i].distance /
(CD->chemtype[j].DiffCoe +
CD->chemtype[j].DispCoe * CD->Flux[i].velocity));
peclet = MAX(peclet, 1.0E-8);
}
CD->Vcele[CD->Flux[i].nodeup - 1].rt_step +=
fabs(CD->Flux[i].flux / CD->Vcele[CD->Flux[i].nodeup - 1].vol) *
(1 + peclet) / peclet;
}
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < CD->NumOsv; i++)
{
CD->Vcele[i].rt_step = 0.6 * UNIT_C / CD->Vcele[i].rt_step;
CD->Vcele[i].rt_step =
(CD->Vcele[i].rt_step >= (double)CD->AvgScl) ?
(double)CD->AvgScl : CD->Vcele[i].rt_step;
}
/*
* RT step control begins
*/
if (CD->TimLst >= CD->Delay)
{
rt_step = stepsize * (double)CD->AvgScl;
AdptTime(CD, CD->TimLst, rt_step, stepsize * (double)CD->AvgScl,
t_duration_transp, t_duration_react);
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < CD->NumEle; i++)
{
int j;
for (j = 0; j < CD->NumStc; j++)
{
if (CD->chemtype[j].itype == MINERAL)
{
/* Averaging mineral concentration to ensure mass
* conservation !! */
CD->Vcele[RT_GW(i)].t_conc[j] =
(CD->Vcele[RT_GW(i)].t_conc[j] *
CD->Vcele[RT_GW(i)].height_t +
CD->Vcele[RT_UNSAT(i)].t_conc[j] *
(pihm->elem[i].soil.depth -
CD->Vcele[RT_GW(i)].height_t)) /
pihm->elem[i].soil.depth;
CD->Vcele[RT_UNSAT(i)].t_conc[j] =
CD->Vcele[RT_GW(i)].t_conc[j];
CD->Vcele[RT_GW(i)].p_conc[j] =
CD->Vcele[RT_GW(i)].t_conc[j];
CD->Vcele[RT_UNSAT(i)].p_conc[j] =
CD->Vcele[RT_GW(i)].t_conc[j];
}
}
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < CD->NumOsv; i++)
{
int j;
/* Make sure intrapolation worked well */
if (fabs(CD->Vcele[i].height_t - CD->Vcele[i].height_int) >
1.0E-6)
fprintf(stderr, "%d %6.4f\t%6.4f\n", i,
CD->Vcele[i].height_t, CD->Vcele[i].height_int);
assert(fabs(CD->Vcele[i].height_t - CD->Vcele[i].height_int) <
1.0E-6);
if (CD->Vcele[i].illness >= 20)
{
for (j = 0; j < CD->NumStc; j++)
CD->Vcele[i].t_conc[j] = 1.0E-10;
fprintf(stderr,
" Cell %d isolated due to proneness to err!\n",
CD->Vcele[i].index);
}
}
} /* RT step control ends */
CD->TimLst = t - pihm->ctrl.starttime / 60;
/* Reset fluxes for next averaging stage */
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (k = 0; k < CD->NumDis; k++)
{
CD->Flux[k].velocity = 0.0;
CD->Flux[k].flux = 0.0;
CD->Flux[k].flux_trib = 0.0;
/* For riv cells, contact area is not needed */
CD->Flux[k].s_area = 0.0;
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (k = 0; k < CD->NumFac; k++)
{
CD->Flux[k].flux = 0.0;
CD->Flux[k].flux_trib = 0.0;
CD->Flux[k].velocity = 0.0;
}
if ((t - pihm->ctrl.starttime / 60) % 60 == 0)
{
CD->SPCFlg = 0;
if (!CD->RecFlg)
{
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < CD->NumStc; i++)
{
int j;
for (j = 0; j < nriver; j++)
{
CD->Vcele[RT_RIVER(j)].p_conc[i] =
(CD->chemtype[i].itype == MINERAL) ?
CD->Vcele[RT_RIVER(j)].t_conc[i] :
fabs(CD->Vcele[RT_RIVER(j)].t_conc[i] * 0.1);
}
}
}
if (!CD->RecFlg)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < nriver; i++)
{
Speciation(CD, RT_RIVER(i));
}
}
else
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < CD->NumOsv; i++)
Speciation(CD, i);
}
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < CD->NumVol; i++)
{
int j;
for (j = 0; j < CD->NumStc; j++)
{
CD->Vcele[i].log10_pconc[j] = log10(CD->Vcele[i].p_conc[j]);
}
for (j = 0; j < CD->NumSsc; j++)
{
CD->Vcele[i].log10_sconc[j] = log10(CD->Vcele[i].s_conc[j]);
}
}
for (k = 0; k < CD->NumBTC; k++)
{
int j;
for (j = 0; j < CD->NumStc; j++)
{
if ((CD->BTC_loc[k] >= CD->pumps[0].Pump_Location - 1) &&
(j == CD->pumps[0].Position_Species))
{
CD->Vcele[CD->BTC_loc[k]].btcv_pconc[j] =
log10((CD->Vcele[CD->BTC_loc[k]].p_conc[j] * CD->rivd +
CD->pumps[0].Injection_conc * CD->pumps[0].flow_rate) /
(CD->rivd + CD->pumps[0].flow_rate));
}
else
{
CD->Vcele[CD->BTC_loc[k]].btcv_pconc[j] =
CD->Vcele[CD->BTC_loc[k]].log10_pconc[j];
}
}
}
}
}
void AdptTime(Chem_Data CD, realtype timelps, double rt_step, double hydro_step,
double *t_duration_transp, double *t_duration_react)
{
double stepsize, end_time;
int i, k, int_flg;
time_t t_start_transp, t_end_transp;
stepsize = rt_step;
end_time = timelps + hydro_step;
t_start_transp = time(NULL);
/* Simple check to determine whether or not to intrapolate the gw height */
if (rt_step >= hydro_step)
{
int_flg = 0;
}
else
{
int_flg = 1;
fprintf(stderr, " Sub time step intrapolation performed. \n");
}
while (timelps < end_time)
{
time_t t_start_react, t_end_react;
stepsize = (stepsize > end_time - timelps) ?
end_time - timelps : stepsize;
if (int_flg)
{
/* Do interpolation. Note that height_int always store the end time
* height. */
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < CD->NumOsv; i++)
{
CD->Vcele[i].height_t =
CD->Vcele[i].height_o + CD->Vcele[i].height_sp * stepsize;
CD->Vcele[i].vol = CD->Vcele[i].area * CD->Vcele[i].height_t;
}
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
int j;
for (j = 0; j < CD->NumSpc; j++)
{
if (CD->chemtype[j].mtype == 2)
{
for (k = 0; k < CD->NumSsc; k++)
{
if ((CD->Totalconc[j][k + CD->NumStc] != 0) &&
(CD->chemtype[k + CD->NumStc].itype != AQUEOUS))
{
CD->Vcele[RT_GW(i)].t_conc[j] = CD->Vcele[RT_GW(i)].t_conc[j] -
CD->Totalconc[j][k + CD->NumStc] *
CD->Vcele[RT_GW(i)].s_conc[k] * CD->TimRiv;
CD->Vcele[RT_UNSAT(i)].t_conc[j] = CD->Vcele[RT_UNSAT(i)].t_conc[j] -
CD->Totalconc[j][k + CD->NumStc] *
CD->Vcele[RT_UNSAT(i)].s_conc[k] * CD->TimRiv;
}
}
}
}
}
OS3D(timelps, stepsize, CD);
/* Total concentration except for adsorptions have been transported and
* adjusted by the volume. For example, if no transport but volume
* increased by rain, the concentration need be decreased. However, the
* adsorption part has not been treated yet, so they need be adjusted by
* the volume change.
* The porosity is not changed during the period, so the ratio between
* pore space before and after OS3D is the same ratio between volume of
* porous media before and after OS3D. */
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
int j;
for (j = 0; j < CD->NumSpc; j++)
{
if (CD->chemtype[j].mtype == 2)
{
for (k = 0; k < CD->NumSsc; k++)
{
if ((CD->Totalconc[j][k + CD->NumStc] != 0) &&
(CD->chemtype[k + CD->NumStc].itype != AQUEOUS))
{
CD->Vcele[RT_GW(i)].t_conc[j] =
CD->Vcele[RT_GW(i)].t_conc[j] + CD->Totalconc[j][k +
CD->NumStc] * CD->Vcele[RT_GW(i)].s_conc[k] *
CD->TimRiv;
CD->Vcele[RT_UNSAT(i)].t_conc[j] =
CD->Vcele[RT_UNSAT(i)].t_conc[j] + CD->Totalconc[j][k +
CD->NumStc] * CD->Vcele[RT_UNSAT(i)].s_conc[k] *
CD->TimRiv;
}
}
}
}
}
t_end_transp = time(NULL);
*t_duration_transp += (t_end_transp - t_start_transp);
t_start_react = time(NULL);
if (int_flg)
{
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < CD->NumVol; i++)
{
CD->Vcele[i].height_o = CD->Vcele[i].height_t;
CD->Vcele[i].vol_o = CD->Vcele[i].area * CD->Vcele[i].height_o;
}
}
if ((!CD->RecFlg) && ((int)(timelps + stepsize) %
(int)(CD->React_delay * stepsize) == 0))
{
#ifdef _OPENMP
# pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
double z_SOC;
z_SOC = CD->Vcele[RT_GW(i)].maxwater -
(CD->Vcele[RT_GW(i)].height_t + CD->Vcele[RT_UNSAT(i)].height_t);
z_SOC = (z_SOC > 0.0) ? z_SOC : 0.0;
React(stepsize, CD, &CD->Vcele[RT_GW(i)], z_SOC);
React(stepsize, CD, &CD->Vcele[RT_UNSAT(i)], z_SOC);
}
}
timelps += stepsize;
if (timelps >= end_time)
{
t_end_react = time(NULL);
*t_duration_react += (t_end_react - t_start_react);
break;
}
}
if ((!CD->RecFlg) &&
((int)(timelps) % (int)(CD->React_delay * stepsize) == 0))
{
/* Do nothing. Place holder for test purposes. */
}
}
void FreeChem(Chem_Data CD)
{
int i;
free(CD->BTC_loc);
free(CD->prepconcindex);
for (i = 0; i < CD->NumSsc; i++)
{
free(CD->Dependency[i]);
}
free(CD->Dependency);
for (i = 0; i < CD->NumMkr + CD->NumAkr; i++)
{
free(CD->Dep_kinetic[i]);
}
free(CD->Dep_kinetic);
for (i = 0; i < CD->NumMin; i++)
{
free(CD->Dep_kinetic_all[i]);
}
free(CD->Dep_kinetic_all);
for (i = 0; i < CD->NumStc; i++)
{
free(CD->Totalconc[i]);
#if NOT_YET_IMPLEMENTED
free(CD->Totalconck[i]);
#endif
}
free(CD->Totalconc);
#if NOT_YET_IMPLEMENTED
free(CD->Totalconck);
#endif
free(CD->kinetics);
free(CD->Keq);
free(CD->KeqKinect);
free(CD->KeqKinect_all);
// CD->Vcele
for (i = 0; i < CD->NumVol; i++)
{
free(CD->Vcele[i].t_conc);
free(CD->Vcele[i].p_conc);
free(CD->Vcele[i].s_conc);
free(CD->Vcele[i].log10_pconc);
free(CD->Vcele[i].log10_sconc);
free(CD->Vcele[i].p_actv);
free(CD->Vcele[i].p_para);
free(CD->Vcele[i].p_type);
free(CD->Vcele[i].btcv_pconc);
}
free(CD->Vcele);
free(CD->Flux);
for (i = 0; i < CD->NumStc + CD->NumSsc; i++)
{
free(CD->chemtype[i].ChemName);
}
free(CD->chemtype);
if (CD->NumPUMP > 0)
{
for (i = 0; i < CD->NumPUMP; i++)
{
free(CD->pumps[i].Name_Species);
}
free(CD->pumps);
}
// CD->TSD_prepconc
for (i = 0; i < CD->TSD_prepconc[0].length; i++)
{
free(CD->TSD_prepconc[0].data[i]);
}
free(CD->TSD_prepconc[0].data);
free(CD->TSD_prepconc[0].ftime);
free(CD->TSD_prepconc[0].value);
free(CD->TSD_prepconc);
free(CD->Precipitation.p_type);
free(CD->Precipitation.t_conc);
free(CD->Precipitation.p_conc);
free(CD->Precipitation.p_para);
}
double Dist2Edge(const meshtbl_struct *meshtbl, const elem_struct *elem,
int edge_j)
{
double para_a, para_b, para_c, x_0, x_1, y_0, y_1;
int index_0, index_1;
index_0 = elem->node[(edge_j + 1) % 3] - 1;
index_1 = elem->node[(edge_j + 2) % 3] - 1;
x_0 = meshtbl->x[index_0];
y_0 = meshtbl->y[index_0];
x_1 = meshtbl->x[index_1];
y_1 = meshtbl->y[index_1];
para_a = y_1 - y_0;
para_b = x_0 - x_1;
para_c = (x_1 - x_0) * y_0 - (y_1 - y_0) * x_0;
return fabs(para_a * elem->topo.x + para_b * elem->topo.y + para_c) /
sqrt(para_a * para_a + para_b * para_b);
}
|
sstruct_matrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_SStructPMatrix class.
*
*****************************************************************************/
#include "_hypre_sstruct_mv.h"
#include "_hypre_struct_mv.hpp"
/*==========================================================================
* SStructPMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixRef( hypre_SStructPMatrix *matrix,
hypre_SStructPMatrix **matrix_ref )
{
hypre_SStructPMatrixRefCount(matrix) ++;
*matrix_ref = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixCreate( MPI_Comm comm,
hypre_SStructPGrid *pgrid,
hypre_SStructStencil **stencils,
hypre_SStructPMatrix **pmatrix_ptr )
{
hypre_SStructPMatrix *pmatrix;
HYPRE_Int nvars;
HYPRE_Int **smaps;
hypre_StructStencil ***sstencils;
hypre_StructMatrix ***smatrices;
HYPRE_Int **symmetric;
hypre_StructStencil *sstencil;
HYPRE_Int *vars;
hypre_Index *sstencil_shape;
HYPRE_Int sstencil_size;
HYPRE_Int new_dim;
HYPRE_Int *new_sizes;
hypre_Index **new_shapes;
HYPRE_Int size;
hypre_StructGrid *sgrid;
HYPRE_Int vi, vj;
HYPRE_Int i, j, k;
pmatrix = hypre_TAlloc(hypre_SStructPMatrix, 1, HYPRE_MEMORY_HOST);
hypre_SStructPMatrixComm(pmatrix) = comm;
hypre_SStructPMatrixPGrid(pmatrix) = pgrid;
hypre_SStructPMatrixStencils(pmatrix) = stencils;
nvars = hypre_SStructPGridNVars(pgrid);
hypre_SStructPMatrixNVars(pmatrix) = nvars;
/* create sstencils */
smaps = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST);
sstencils = hypre_TAlloc(hypre_StructStencil **, nvars, HYPRE_MEMORY_HOST);
new_sizes = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST);
new_shapes = hypre_TAlloc(hypre_Index *, nvars, HYPRE_MEMORY_HOST);
size = 0;
for (vi = 0; vi < nvars; vi++)
{
sstencils[vi] = hypre_TAlloc(hypre_StructStencil *, nvars, HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
sstencils[vi][vj] = NULL;
new_sizes[vj] = 0;
}
sstencil = hypre_SStructStencilSStencil(stencils[vi]);
vars = hypre_SStructStencilVars(stencils[vi]);
sstencil_shape = hypre_StructStencilShape(sstencil);
sstencil_size = hypre_StructStencilSize(sstencil);
smaps[vi] = hypre_TAlloc(HYPRE_Int, sstencil_size, HYPRE_MEMORY_HOST);
for (i = 0; i < sstencil_size; i++)
{
j = vars[i];
new_sizes[j]++;
}
for (vj = 0; vj < nvars; vj++)
{
if (new_sizes[vj])
{
new_shapes[vj] = hypre_TAlloc(hypre_Index, new_sizes[vj], HYPRE_MEMORY_HOST);
new_sizes[vj] = 0;
}
}
for (i = 0; i < sstencil_size; i++)
{
j = vars[i];
k = new_sizes[j];
hypre_CopyIndex(sstencil_shape[i], new_shapes[j][k]);
smaps[vi][i] = k;
new_sizes[j]++;
}
new_dim = hypre_StructStencilNDim(sstencil);
for (vj = 0; vj < nvars; vj++)
{
if (new_sizes[vj])
{
sstencils[vi][vj] =
hypre_StructStencilCreate(new_dim, new_sizes[vj], new_shapes[vj]);
}
size = hypre_max(size, new_sizes[vj]);
}
}
hypre_SStructPMatrixSMaps(pmatrix) = smaps;
hypre_SStructPMatrixSStencils(pmatrix) = sstencils;
hypre_TFree(new_sizes, HYPRE_MEMORY_HOST);
hypre_TFree(new_shapes, HYPRE_MEMORY_HOST);
/* create smatrices */
smatrices = hypre_TAlloc(hypre_StructMatrix **, nvars, HYPRE_MEMORY_HOST);
for (vi = 0; vi < nvars; vi++)
{
smatrices[vi] = hypre_TAlloc(hypre_StructMatrix *, nvars, HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
smatrices[vi][vj] = NULL;
if (sstencils[vi][vj] != NULL)
{
sgrid = hypre_SStructPGridSGrid(pgrid, vi);
smatrices[vi][vj] =
hypre_StructMatrixCreate(comm, sgrid, sstencils[vi][vj]);
}
}
}
hypre_SStructPMatrixSMatrices(pmatrix) = smatrices;
/* create symmetric */
symmetric = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST);
for (vi = 0; vi < nvars; vi++)
{
symmetric[vi] = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
symmetric[vi][vj] = 0;
}
}
hypre_SStructPMatrixSymmetric(pmatrix) = symmetric;
hypre_SStructPMatrixSEntriesSize(pmatrix) = size;
hypre_SStructPMatrixSEntries(pmatrix) = hypre_TAlloc(HYPRE_Int, size, HYPRE_MEMORY_HOST);
hypre_SStructPMatrixRefCount(pmatrix) = 1;
*pmatrix_ptr = pmatrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixDestroy( hypre_SStructPMatrix *pmatrix )
{
hypre_SStructStencil **stencils;
HYPRE_Int nvars;
HYPRE_Int **smaps;
hypre_StructStencil ***sstencils;
hypre_StructMatrix ***smatrices;
HYPRE_Int **symmetric;
HYPRE_Int vi, vj;
if (pmatrix)
{
hypre_SStructPMatrixRefCount(pmatrix) --;
if (hypre_SStructPMatrixRefCount(pmatrix) == 0)
{
stencils = hypre_SStructPMatrixStencils(pmatrix);
nvars = hypre_SStructPMatrixNVars(pmatrix);
smaps = hypre_SStructPMatrixSMaps(pmatrix);
sstencils = hypre_SStructPMatrixSStencils(pmatrix);
smatrices = hypre_SStructPMatrixSMatrices(pmatrix);
symmetric = hypre_SStructPMatrixSymmetric(pmatrix);
for (vi = 0; vi < nvars; vi++)
{
HYPRE_SStructStencilDestroy(stencils[vi]);
hypre_TFree(smaps[vi], HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
hypre_StructStencilDestroy(sstencils[vi][vj]);
hypre_StructMatrixDestroy(smatrices[vi][vj]);
}
hypre_TFree(sstencils[vi], HYPRE_MEMORY_HOST);
hypre_TFree(smatrices[vi], HYPRE_MEMORY_HOST);
hypre_TFree(symmetric[vi], HYPRE_MEMORY_HOST);
}
hypre_TFree(stencils, HYPRE_MEMORY_HOST);
hypre_TFree(smaps, HYPRE_MEMORY_HOST);
hypre_TFree(sstencils, HYPRE_MEMORY_HOST);
hypre_TFree(smatrices, HYPRE_MEMORY_HOST);
hypre_TFree(symmetric, HYPRE_MEMORY_HOST);
hypre_TFree(hypre_SStructPMatrixSEntries(pmatrix), HYPRE_MEMORY_HOST);
hypre_TFree(pmatrix, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixInitialize( hypre_SStructPMatrix *pmatrix )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
HYPRE_Int **symmetric = hypre_SStructPMatrixSymmetric(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
/* HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; */
/* HYPRE_Int vi, vj, d, ndim; */
#if 0
ndim = hypre_SStructPMatrixNDim(pmatrix);
/* RDF: Why are the ghosts being reset to one? Maybe it needs to be at least
* one to set shared coefficients correctly, but not exactly one? */
for (d = 0; d < ndim; d++)
{
num_ghost[2 * d] = num_ghost[2 * d + 1] = 1;
}
#endif
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
HYPRE_StructMatrixSetSymmetric(smatrix, symmetric[vi][vj]);
/* hypre_StructMatrixSetNumGhost(smatrix, num_ghost); */
hypre_StructMatrixInitialize(smatrix);
/* needed to get AddTo accumulation correct between processors */
hypre_StructMatrixClearGhostValues(smatrix);
}
}
}
hypre_SStructPMatrixAccumulated(pmatrix) = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetValues( hypre_SStructPMatrix *pmatrix,
hypre_Index index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var);
HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_StructMatrix *smatrix;
hypre_BoxArray *grid_boxes;
hypre_Box *box, *grow_box;
HYPRE_Int *sentries;
HYPRE_Int i;
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]);
sentries = hypre_SStructPMatrixSEntries(pmatrix);
for (i = 0; i < nentries; i++)
{
sentries[i] = smap[entries[i]];
}
/* set values inside the grid */
hypre_StructMatrixSetValues(smatrix, index, nentries, sentries, values,
action, -1, 0);
/* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */
if (action != 0)
{
/* AddTo/Get */
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
hypre_Index varoffset;
HYPRE_Int done = 0;
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
if (hypre_IndexInBox(index, box))
{
done = 1;
break;
}
}
if (!done)
{
grow_box = hypre_BoxCreate(hypre_BoxArrayNDim(grid_boxes));
hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var),
hypre_SStructPGridNDim(pgrid), varoffset);
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
hypre_CopyBox(box, grow_box);
hypre_BoxGrowByIndex(grow_box, varoffset);
if (hypre_IndexInBox(index, grow_box))
{
hypre_StructMatrixSetValues(smatrix, index, nentries, sentries,
values, action, i, 1);
break;
}
}
hypre_BoxDestroy(grow_box);
}
}
else
{
/* Set */
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
if (!hypre_IndexInBox(index, box))
{
hypre_StructMatrixClearValues(smatrix, index, nentries, sentries, i, 1);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetBoxValues( hypre_SStructPMatrix *pmatrix,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructPMatrixNDim(pmatrix);
hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var);
HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_StructMatrix *smatrix;
hypre_BoxArray *grid_boxes;
HYPRE_Int *sentries;
HYPRE_Int i, j;
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]);
sentries = hypre_SStructPMatrixSEntries(pmatrix);
for (i = 0; i < nentries; i++)
{
sentries[i] = smap[entries[i]];
}
/* set values inside the grid */
hypre_StructMatrixSetBoxValues(smatrix, set_box, value_box, nentries, sentries,
values, action, -1, 0);
/* TODO: Why need DeviceSync? */
#if defined(HYPRE_USING_GPU)
hypre_SyncCudaDevice(hypre_handle());
#endif
/* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */
if (action != 0)
{
/* AddTo/Get */
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
hypre_Index varoffset;
hypre_BoxArray *left_boxes, *done_boxes, *temp_boxes;
hypre_Box *left_box, *done_box, *int_box;
hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var),
hypre_SStructPGridNDim(pgrid), varoffset);
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
left_boxes = hypre_BoxArrayCreate(1, ndim);
done_boxes = hypre_BoxArrayCreate(2, ndim);
temp_boxes = hypre_BoxArrayCreate(0, ndim);
/* done_box always points to the first box in done_boxes */
done_box = hypre_BoxArrayBox(done_boxes, 0);
/* int_box always points to the second box in done_boxes */
int_box = hypre_BoxArrayBox(done_boxes, 1);
hypre_CopyBox(set_box, hypre_BoxArrayBox(left_boxes, 0));
hypre_BoxArraySetSize(left_boxes, 1);
hypre_SubtractBoxArrays(left_boxes, grid_boxes, temp_boxes);
hypre_BoxArraySetSize(done_boxes, 0);
hypre_ForBoxI(i, grid_boxes)
{
hypre_SubtractBoxArrays(left_boxes, done_boxes, temp_boxes);
hypre_BoxArraySetSize(done_boxes, 1);
hypre_CopyBox(hypre_BoxArrayBox(grid_boxes, i), done_box);
hypre_BoxGrowByIndex(done_box, varoffset);
hypre_ForBoxI(j, left_boxes)
{
left_box = hypre_BoxArrayBox(left_boxes, j);
hypre_IntersectBoxes(left_box, done_box, int_box);
hypre_StructMatrixSetBoxValues(smatrix, int_box, value_box,
nentries, sentries,
values, action, i, 1);
}
}
hypre_BoxArrayDestroy(left_boxes);
hypre_BoxArrayDestroy(done_boxes);
hypre_BoxArrayDestroy(temp_boxes);
}
else
{
/* Set */
hypre_BoxArray *diff_boxes;
hypre_Box *grid_box, *diff_box;
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
diff_boxes = hypre_BoxArrayCreate(0, ndim);
hypre_ForBoxI(i, grid_boxes)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
hypre_BoxArraySetSize(diff_boxes, 0);
hypre_SubtractBoxes(set_box, grid_box, diff_boxes);
hypre_ForBoxI(j, diff_boxes)
{
diff_box = hypre_BoxArrayBox(diff_boxes, j);
hypre_StructMatrixClearBoxValues(smatrix, diff_box, nentries, sentries,
i, 1);
}
}
hypre_BoxArrayDestroy(diff_boxes);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixAccumulate( hypre_SStructPMatrix *pmatrix )
{
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
HYPRE_Int ndim = hypre_SStructPGridNDim(pgrid);
HYPRE_SStructVariable *vartypes = hypre_SStructPGridVarTypes(pgrid);
hypre_StructMatrix *smatrix;
hypre_Index varoffset;
HYPRE_Int num_ghost[2 * HYPRE_MAXDIM];
hypre_StructGrid *sgrid;
HYPRE_Int vi, vj, d;
hypre_CommInfo *comm_info;
hypre_CommPkg *comm_pkg;
hypre_CommHandle *comm_handle;
/* if values already accumulated, just return */
if (hypre_SStructPMatrixAccumulated(pmatrix))
{
return hypre_error_flag;
}
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
sgrid = hypre_StructMatrixGrid(smatrix);
/* assumes vi and vj vartypes are the same */
hypre_SStructVariableGetOffset(vartypes[vi], ndim, varoffset);
for (d = 0; d < ndim; d++)
{
num_ghost[2 * d] = num_ghost[2 * d + 1] = hypre_IndexD(varoffset, d);
}
/* accumulate values from AddTo */
hypre_CreateCommInfoFromNumGhost(sgrid, num_ghost, &comm_info);
hypre_CommPkgCreate(comm_info,
hypre_StructMatrixDataSpace(smatrix),
hypre_StructMatrixDataSpace(smatrix),
hypre_StructMatrixNumValues(smatrix), NULL, 1,
hypre_StructMatrixComm(smatrix),
&comm_pkg);
hypre_InitializeCommunication(comm_pkg,
hypre_StructMatrixData(smatrix),
hypre_StructMatrixData(smatrix),
1, 0, &comm_handle);
hypre_FinalizeCommunication(comm_handle);
hypre_CommInfoDestroy(comm_info);
hypre_CommPkgDestroy(comm_pkg);
}
}
}
hypre_SStructPMatrixAccumulated(pmatrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixAssemble( hypre_SStructPMatrix *pmatrix )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
hypre_SStructPMatrixAccumulate(pmatrix);
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
hypre_StructMatrixClearGhostValues(smatrix);
hypre_StructMatrixAssemble(smatrix);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetSymmetric( hypre_SStructPMatrix *pmatrix,
HYPRE_Int var,
HYPRE_Int to_var,
HYPRE_Int symmetric )
{
HYPRE_Int **pmsymmetric = hypre_SStructPMatrixSymmetric(pmatrix);
HYPRE_Int vstart = var;
HYPRE_Int vsize = 1;
HYPRE_Int tstart = to_var;
HYPRE_Int tsize = 1;
HYPRE_Int v, t;
if (var == -1)
{
vstart = 0;
vsize = hypre_SStructPMatrixNVars(pmatrix);
}
if (to_var == -1)
{
tstart = 0;
tsize = hypre_SStructPMatrixNVars(pmatrix);
}
for (v = vstart; v < vsize; v++)
{
for (t = tstart; t < tsize; t++)
{
pmsymmetric[v][t] = symmetric;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixPrint( const char *filename,
hypre_SStructPMatrix *pmatrix,
HYPRE_Int all )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
char new_filename[255];
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
hypre_sprintf(new_filename, "%s.%02d.%02d", filename, vi, vj);
hypre_StructMatrixPrint(new_filename, smatrix, all);
}
}
}
return hypre_error_flag;
}
/*==========================================================================
* SStructUMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixInitialize( hypre_SStructMatrix *matrix )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int nparts = hypre_SStructGraphNParts(graph);
hypre_SStructPGrid **pgrids = hypre_SStructGraphPGrids(graph);
hypre_SStructStencil ***stencils = hypre_SStructGraphStencils(graph);
HYPRE_Int nUventries = hypre_SStructGraphNUVEntries(graph);
HYPRE_Int *iUventries = hypre_SStructGraphIUVEntries(graph);
hypre_SStructUVEntry **Uventries = hypre_SStructGraphUVEntries(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
hypre_StructGrid *sgrid;
hypre_SStructStencil *stencil;
HYPRE_Int *split;
HYPRE_Int nvars;
HYPRE_Int nrows, rowstart, nnzs ;
HYPRE_Int part, var, entry, b, m, mi;
HYPRE_Int *row_sizes;
HYPRE_Int max_row_size;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Box *ghost_box;
hypre_IndexRef start;
hypre_Index loop_size, stride;
HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR);
#ifdef HYPRE_USING_OPENMP
HYPRE_IJMatrixSetOMPFlag(ijmatrix, 1); /* Use OpenMP */
#endif
if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT)
{
rowstart = hypre_SStructGridGhstartRank(grid);
nrows = hypre_SStructGridGhlocalSize(grid) ;
}
else /* matrix_type == HYPRE_PARCSR */
{
rowstart = hypre_SStructGridStartRank(grid);
nrows = hypre_SStructGridLocalSize(grid);
}
/* set row sizes */
m = 0;
max_row_size = 0;
ghost_box = hypre_BoxCreate(ndim);
row_sizes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_SetIndex(stride, 1);
for (part = 0; part < nparts; part++)
{
nvars = hypre_SStructPGridNVars(pgrids[part]);
for (var = 0; var < nvars; var++)
{
sgrid = hypre_SStructPGridSGrid(pgrids[part], var);
stencil = stencils[part][var];
split = hypre_SStructMatrixSplit(matrix, part, var);
nnzs = 0;
for (entry = 0; entry < hypre_SStructStencilSize(stencil); entry++)
{
if (split[entry] == -1)
{
nnzs++;
}
}
#if 0
/* TODO: For now, assume stencil is full/complete */
if (hypre_SStructMatrixSymmetric(matrix))
{
nnzs = 2 * nnzs - 1;
}
#endif
boxes = hypre_StructGridBoxes(sgrid);
hypre_ForBoxI(b, boxes)
{
box = hypre_BoxArrayBox(boxes, b);
hypre_CopyBox(box, ghost_box);
if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT)
{
hypre_BoxGrowByArray(ghost_box, hypre_StructGridNumGhost(sgrid));
}
start = hypre_BoxIMin(box);
hypre_BoxGetSize(box, loop_size);
zypre_BoxLoop1Begin(hypre_SStructMatrixNDim(matrix), loop_size,
ghost_box, start, stride, mi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop1For(mi)
{
row_sizes[m + mi] = nnzs;
}
zypre_BoxLoop1End(mi);
m += hypre_BoxVolume(ghost_box);
}
max_row_size = hypre_max(max_row_size, nnzs);
if (nvneighbors[part][var])
{
max_row_size =
hypre_max(max_row_size, hypre_SStructStencilSize(stencil));
}
}
}
hypre_BoxDestroy(ghost_box);
/* GEC0902 essentially for each UVentry we figure out how many extra columns
* we need to add to the rowsizes */
/* RDF: THREAD? */
for (entry = 0; entry < nUventries; entry++)
{
mi = iUventries[entry];
m = hypre_SStructUVEntryRank(Uventries[mi]) - rowstart;
if ((m > -1) && (m < nrows))
{
row_sizes[m] += hypre_SStructUVEntryNUEntries(Uventries[mi]);
max_row_size = hypre_max(max_row_size, row_sizes[m]);
}
}
/* ZTODO: Update row_sizes based on neighbor off-part couplings */
HYPRE_IJMatrixSetRowSizes (ijmatrix, (const HYPRE_Int *) row_sizes);
hypre_TFree(row_sizes, HYPRE_MEMORY_HOST);
hypre_SStructMatrixTmpSize(matrix) = max_row_size;
hypre_SStructMatrixTmpRowCoords(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size,
HYPRE_MEMORY_HOST);
hypre_SStructMatrixTmpColCoords(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size,
HYPRE_MEMORY_HOST);
hypre_SStructMatrixTmpCoeffs(matrix) = hypre_CTAlloc(HYPRE_Complex, max_row_size,
HYPRE_MEMORY_HOST);
hypre_SStructMatrixTmpRowCoordsDevice(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size,
HYPRE_MEMORY_DEVICE);
hypre_SStructMatrixTmpColCoordsDevice(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size,
HYPRE_MEMORY_DEVICE);
hypre_SStructMatrixTmpCoeffsDevice(matrix) = hypre_CTAlloc(HYPRE_Complex, max_row_size,
HYPRE_MEMORY_DEVICE);
HYPRE_IJMatrixInitialize(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*
* 9/09 - AB: modified to use the box manager - here we need to check the
* neighbor box manager also
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixSetValues( hypre_SStructMatrix *matrix,
HYPRE_Int part,
hypre_Index index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_Index *shape = hypre_SStructStencilShape(stencil);
HYPRE_Int size = hypre_SStructStencilSize(stencil);
hypre_IndexRef offset;
hypre_Index to_index;
hypre_SStructUVEntry *Uventry;
hypre_BoxManEntry *boxman_entry;
hypre_SStructBoxManInfo *entry_info;
HYPRE_BigInt row_coord;
HYPRE_BigInt *col_coords;
HYPRE_Int ncoeffs;
HYPRE_Complex *coeffs;
HYPRE_Int i, entry;
HYPRE_BigInt Uverank;
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
HYPRE_Complex *h_values;
hypre_SStructGridFindBoxManEntry(grid, part, index, var, &boxman_entry);
/* if not local, check neighbors */
if (boxman_entry == NULL)
{
hypre_SStructGridFindNborBoxManEntry(grid, part, index, var, &boxman_entry);
}
if (boxman_entry == NULL)
{
hypre_error_in_arg(1);
hypre_error_in_arg(2);
hypre_error_in_arg(3);
return hypre_error_flag;
}
else
{
hypre_BoxManEntryGetInfo(boxman_entry, (void **) &entry_info);
}
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index,
&row_coord, matrix_type);
col_coords = hypre_SStructMatrixTmpColCoords(matrix);
coeffs = hypre_SStructMatrixTmpCoeffs(matrix);
/* RL: copy values to host since the following for-loop is on CPU */
if ( hypre_GetActualMemLocation(HYPRE_MEMORY_DEVICE) != hypre_MEMORY_HOST )
{
h_values = hypre_TAlloc(HYPRE_Complex, nentries, HYPRE_MEMORY_HOST);
hypre_TMemcpy(h_values, values, HYPRE_Complex, nentries, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
}
else
{
h_values = values;
}
/* RL: TODO Port it to GPU? */
ncoeffs = 0;
for (i = 0; i < nentries; i++)
{
entry = entries[i];
if (entry < size)
{
/* stencil entries */
offset = shape[entry];
hypre_AddIndexes(index, offset, ndim, to_index);
hypre_SStructGridFindBoxManEntry(dom_grid, part, to_index, vars[entry],
&boxman_entry);
/* if not local, check neighbors */
if (boxman_entry == NULL)
{
hypre_SStructGridFindNborBoxManEntry(dom_grid, part, to_index,
vars[entry], &boxman_entry);
}
if (boxman_entry != NULL)
{
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, to_index,
&col_coords[ncoeffs], matrix_type);
coeffs[ncoeffs] = h_values[i];
ncoeffs++;
}
}
else
{
/* non-stencil entries */
entry -= size;
hypre_SStructGraphGetUVEntryRank(graph, part, var, index, &Uverank);
if (Uverank > -1)
{
Uventry = hypre_SStructGraphUVEntry(graph, Uverank);
col_coords[ncoeffs] = hypre_SStructUVEntryToRank(Uventry, entry);
coeffs[ncoeffs] = h_values[i];
ncoeffs++;
}
}
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_BigInt *d_row_coords = hypre_SStructMatrixTmpRowCoordsDevice(matrix);
HYPRE_BigInt *d_col_coords = hypre_SStructMatrixTmpColCoordsDevice(matrix);
HYPRE_Complex *d_coeffs = hypre_SStructMatrixTmpCoeffsDevice(matrix);
if ( hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(ijmatrix)) == HYPRE_EXEC_DEVICE )
{
hypreDevice_BigIntFilln(d_row_coords, ncoeffs, row_coord);
hypre_TMemcpy(d_col_coords, col_coords, HYPRE_BigInt, ncoeffs, HYPRE_MEMORY_DEVICE,
HYPRE_MEMORY_HOST);
hypre_TMemcpy(d_coeffs, coeffs, HYPRE_Complex, ncoeffs, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
if (action > 0)
{
HYPRE_IJMatrixAddToValues(ijmatrix, ncoeffs, NULL, d_row_coords,
(const HYPRE_BigInt *) d_col_coords,
(const HYPRE_Complex *) d_coeffs);
}
else if (action > -1)
{
HYPRE_IJMatrixSetValues(ijmatrix, ncoeffs, NULL, d_row_coords,
(const HYPRE_BigInt *) d_col_coords,
(const HYPRE_Complex *) d_coeffs);
}
else
{
// RL:TODO
HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord,
col_coords, values);
}
}
else
#endif
{
if (action > 0)
{
HYPRE_IJMatrixAddToValues(ijmatrix, 1, &ncoeffs, &row_coord,
(const HYPRE_BigInt *) col_coords,
(const HYPRE_Complex *) coeffs);
}
else if (action > -1)
{
HYPRE_IJMatrixSetValues(ijmatrix, 1, &ncoeffs, &row_coord,
(const HYPRE_BigInt *) col_coords,
(const HYPRE_Complex *) coeffs);
}
else
{
HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord,
col_coords, values);
}
}
if (h_values != values)
{
hypre_TFree(h_values, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Note: Entries must all be of type stencil or non-stencil, but not both.
*
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*
* 9/09 - AB: modified to use the box manager- here we need to check the
* neighbor box manager also
*
* To illustrate what is computed below before calling IJSetValues2(), consider
* the following example of a 5-pt stencil (c,w,e,s,n) on a 3x2 grid (the 'x' in
* arrays 'cols' and 'ijvalues' indicates "no data"):
*
* nrows = 6
* ncols = 3 4 3 3 4 3
* rows = 0 1 2 3 4 5
* row_indexes = 0 5 10 15 20 25
* cols = . . . x x . . . . x . . . x x . . . x x . . . . x . . . x x
* ijvalues = . . . x x . . . . x . . . x x . . . x x . . . . x . . . x x
* entry = c e n c w e n c w n c e s c w e s c w s
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixSetBoxValues( hypre_SStructMatrix *matrix,
HYPRE_Int part,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_Index *shape = hypre_SStructStencilShape(stencil);
HYPRE_Int size = hypre_SStructStencilSize(stencil);
hypre_IndexRef offset;
hypre_BoxManEntry **boxman_entries;
HYPRE_Int nboxman_entries;
hypre_BoxManEntry **boxman_to_entries;
HYPRE_Int nboxman_to_entries;
HYPRE_Int nrows;
HYPRE_Int *ncols, *row_indexes;;
HYPRE_BigInt *rows, *cols;
HYPRE_Complex *ijvalues;
hypre_Box *box;
hypre_Box *to_box;
hypre_Box *map_box;
hypre_Box *int_box;
hypre_Index index, stride, loop_size;
hypre_IndexRef start;
hypre_Index rs, cs;
HYPRE_BigInt row_base, col_base;
HYPRE_Int ei, entry, ii, jj;
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
box = hypre_BoxCreate(ndim);
/*------------------------------------------
* all stencil entries
*------------------------------------------*/
if (entries[0] < size)
{
to_box = hypre_BoxCreate(ndim);
map_box = hypre_BoxCreate(ndim);
int_box = hypre_BoxCreate(ndim);
nrows = hypre_BoxVolume(set_box);
ncols = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE);
rows = hypre_CTAlloc(HYPRE_BigInt, nrows, HYPRE_MEMORY_DEVICE);
row_indexes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE);
cols = hypre_CTAlloc(HYPRE_BigInt, nrows * nentries, HYPRE_MEMORY_DEVICE);
ijvalues = hypre_CTAlloc(HYPRE_Complex, nrows * nentries, HYPRE_MEMORY_DEVICE);
hypre_SetIndex(stride, 1);
hypre_SStructGridIntersect(grid, part, var, set_box, -1,
&boxman_entries, &nboxman_entries);
for (ii = 0; ii < nboxman_entries; ii++)
{
hypre_SStructBoxManEntryGetStrides(boxman_entries[ii], rs, matrix_type);
hypre_CopyBox(set_box, box);
hypre_BoxManEntryGetExtents(boxman_entries[ii],
hypre_BoxIMin(map_box), hypre_BoxIMax(map_box));
hypre_IntersectBoxes(box, map_box, int_box);
hypre_CopyBox(int_box, box);
/* For each index in 'box', compute a row of length <= nentries and
* insert it into an nentries-length segment of 'cols' and 'ijvalues'.
* This may result in gaps, but IJSetValues2() is designed for that. */
nrows = hypre_BoxVolume(box);
#undef DEVICE_VAR
#define DEVICE_VAR is_device_ptr(ncols,row_indexes)
hypre_LoopBegin(nrows, i)
{
ncols[i] = 0;
row_indexes[i] = i * nentries;
}
hypre_LoopEnd()
#undef DEVICE_VAR
#define DEVICE_VAR
for (ei = 0; ei < nentries; ei++)
{
entry = entries[ei];
hypre_CopyBox(box, to_box);
offset = shape[entry];
hypre_BoxShiftPos(to_box, offset);
hypre_SStructGridIntersect(dom_grid, part, vars[entry], to_box, -1,
&boxman_to_entries, &nboxman_to_entries);
for (jj = 0; jj < nboxman_to_entries; jj++)
{
hypre_SStructBoxManEntryGetStrides(boxman_to_entries[jj], cs, matrix_type);
hypre_BoxManEntryGetExtents(boxman_to_entries[jj],
hypre_BoxIMin(map_box), hypre_BoxIMax(map_box));
hypre_IntersectBoxes(to_box, map_box, int_box);
hypre_CopyIndex(hypre_BoxIMin(int_box), index);
hypre_SStructBoxManEntryGetGlobalRank(boxman_to_entries[jj],
index, &col_base, matrix_type);
hypre_BoxShiftNeg(int_box, offset);
hypre_CopyIndex(hypre_BoxIMin(int_box), index);
hypre_SStructBoxManEntryGetGlobalRank(boxman_entries[ii],
index, &row_base, matrix_type);
start = hypre_BoxIMin(int_box);
hypre_BoxGetSize(int_box, loop_size);
#if defined(HYPRE_USING_GPU)
hypre_assert(ndim <= 3);
HYPRE_Int rs_0, rs_1, rs_2;
HYPRE_Int cs_0, cs_1, cs_2;
if (ndim > 0)
{
rs_0 = rs[0];
cs_0 = cs[0];
}
if (ndim > 1)
{
rs_1 = rs[1];
cs_1 = cs[1];
}
if (ndim > 2)
{
rs_2 = rs[2];
cs_2 = cs[2];
}
#endif
#undef DEVICE_VAR
#define DEVICE_VAR is_device_ptr(ncols,rows,cols,ijvalues,values)
hypre_BoxLoop2Begin(ndim, loop_size,
box, start, stride, mi,
value_box, start, stride, vi);
{
hypre_Index index;
HYPRE_Int ci;
hypre_BoxLoopGetIndex(index);
ci = mi * nentries + ncols[mi];
rows[mi] = row_base;
cols[ci] = col_base;
#if defined(HYPRE_USING_GPU)
if (ndim > 0)
{
rows[mi] += index[0] * rs_0;
cols[ci] += index[0] * cs_0;
}
if (ndim > 1)
{
rows[mi] += index[1] * rs_1;
cols[ci] += index[1] * cs_1;
}
if (ndim > 2)
{
rows[mi] += index[2] * rs_2;
cols[ci] += index[2] * cs_2;
}
#else
HYPRE_Int d;
for (d = 0; d < ndim; d++)
{
rows[mi] += index[d] * rs[d];
cols[ci] += index[d] * cs[d];
}
#endif
ijvalues[ci] = values[ei + vi * nentries];
ncols[mi]++;
}
hypre_BoxLoop2End(mi, vi);
#undef DEVICE_VAR
#define DEVICE_VAR
} /* end loop through boxman to entries */
hypre_TFree(boxman_to_entries, HYPRE_MEMORY_HOST);
} /* end of ei nentries loop */
if (action > 0)
{
HYPRE_IJMatrixAddToValues2(ijmatrix, nrows, ncols,
(const HYPRE_BigInt *) rows,
(const HYPRE_Int *) row_indexes,
(const HYPRE_BigInt *) cols,
(const HYPRE_Complex *) ijvalues);
}
else if (action > -1)
{
HYPRE_IJMatrixSetValues2(ijmatrix, nrows, ncols,
(const HYPRE_BigInt *) rows,
(const HYPRE_Int *) row_indexes,
(const HYPRE_BigInt *) cols,
(const HYPRE_Complex *) ijvalues);
}
else
{
HYPRE_IJMatrixGetValues(ijmatrix, nrows, ncols, rows, cols, values);
}
} /* end loop through boxman entries */
hypre_TFree(boxman_entries, HYPRE_MEMORY_HOST);
hypre_TFree(ncols, HYPRE_MEMORY_DEVICE);
hypre_TFree(rows, HYPRE_MEMORY_DEVICE);
hypre_TFree(row_indexes, HYPRE_MEMORY_DEVICE);
hypre_TFree(cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijvalues, HYPRE_MEMORY_DEVICE);
hypre_BoxDestroy(to_box);
hypre_BoxDestroy(map_box);
hypre_BoxDestroy(int_box);
}
/*------------------------------------------
* non-stencil entries
*------------------------------------------*/
else
{
/* RDF: THREAD (Check safety on UMatrixSetValues call) */
hypre_BoxGetSize(set_box, loop_size);
hypre_SerialBoxLoop0Begin(ndim, loop_size);
{
zypre_BoxLoopGetIndex(index);
hypre_AddIndexes(index, hypre_BoxIMin(set_box), ndim, index);
hypre_SStructUMatrixSetValues(matrix, part, index, var,
nentries, entries, values, action);
values += nentries;
}
hypre_SerialBoxLoop0End();
}
hypre_BoxDestroy(box);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixAssemble( hypre_SStructMatrix *matrix )
{
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
HYPRE_IJMatrixAssemble(ijmatrix);
HYPRE_IJMatrixGetObject(
ijmatrix, (void **) &hypre_SStructMatrixParCSRMatrix(matrix));
return hypre_error_flag;
}
/*==========================================================================
* SStructMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixRef( hypre_SStructMatrix *matrix,
hypre_SStructMatrix **matrix_ref )
{
hypre_SStructMatrixRefCount(matrix) ++;
*matrix_ref = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSplitEntries( hypre_SStructMatrix *matrix,
HYPRE_Int part,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Int *nSentries_ptr,
HYPRE_Int **Sentries_ptr,
HYPRE_Int *nUentries_ptr,
HYPRE_Int **Uentries_ptr )
{
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
HYPRE_Int *split = hypre_SStructMatrixSplit(matrix, part, var);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int entry;
HYPRE_Int i;
HYPRE_Int nSentries = 0;
HYPRE_Int *Sentries = hypre_SStructMatrixSEntries(matrix);
HYPRE_Int nUentries = 0;
HYPRE_Int *Uentries = hypre_SStructMatrixUEntries(matrix);
for (i = 0; i < nentries; i++)
{
entry = entries[i];
if (entry < hypre_SStructStencilSize(stencil))
{
/* stencil entries */
if (split[entry] > -1)
{
Sentries[nSentries] = split[entry];
nSentries++;
}
else
{
Uentries[nUentries] = entry;
nUentries++;
}
}
else
{
/* non-stencil entries */
Uentries[nUentries] = entry;
nUentries++;
}
}
*nSentries_ptr = nSentries;
*Sentries_ptr = Sentries;
*nUentries_ptr = nUentries;
*Uentries_ptr = Uentries;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
HYPRE_Int *index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
HYPRE_Int *Sentries;
HYPRE_Int *Uentries;
HYPRE_Int nSentries;
HYPRE_Int nUentries;
hypre_SStructPMatrix *pmatrix;
hypre_Index cindex;
hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries,
&nSentries, &Sentries,
&nUentries, &Uentries);
hypre_CopyToCleanIndex(index, ndim, cindex);
/* S-matrix */
if (nSentries > 0)
{
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
hypre_SStructPMatrixSetValues(pmatrix, cindex, var,
nSentries, Sentries, values, action);
/* put inter-part couplings in UMatrix and zero them out in PMatrix
* (possibly in ghost zones) */
if (nvneighbors[part][var] > 0)
{
hypre_Box *set_box;
HYPRE_Int d;
/* This creates boxes with zeroed-out extents */
set_box = hypre_BoxCreate(ndim);
for (d = 0; d < ndim; d++)
{
hypre_BoxIMinD(set_box, d) = cindex[d];
hypre_BoxIMaxD(set_box, d) = cindex[d];
}
hypre_SStructMatrixSetInterPartValues(matrix, part, set_box, var, nSentries, entries,
set_box, values, action);
hypre_BoxDestroy(set_box);
}
}
/* U-matrix */
if (nUentries > 0)
{
hypre_SStructUMatrixSetValues(matrix, part, cindex, var,
nUentries, Uentries, values, action);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetBoxValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
HYPRE_Int *Sentries;
HYPRE_Int *Uentries;
HYPRE_Int nSentries;
HYPRE_Int nUentries;
hypre_SStructPMatrix *pmatrix;
hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries,
&nSentries, &Sentries,
&nUentries, &Uentries);
/* S-matrix */
if (nSentries > 0)
{
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
hypre_SStructPMatrixSetBoxValues(pmatrix, set_box, var, nSentries, Sentries,
value_box, values, action);
/* put inter-part couplings in UMatrix and zero them out in PMatrix
* (possibly in ghost zones) */
if (nvneighbors[part][var] > 0)
{
hypre_SStructMatrixSetInterPartValues(matrix, part, set_box, var, nSentries, entries,
value_box, values, action);
}
}
/* U-matrix */
if (nUentries > 0)
{
hypre_SStructUMatrixSetBoxValues(matrix, part, set_box, var, nUentries, Uentries,
value_box, values, action);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Put inter-part couplings in UMatrix and zero them out in PMatrix (possibly in
* ghost zones). Assumes that all entries are stencil entries.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetInterPartValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructPMatrix *pmatrix;
hypre_SStructPGrid *pgrid;
hypre_SStructStencil *stencil;
hypre_Index *shape;
HYPRE_Int *smap;
HYPRE_Int *vars, frvartype, tovartype;
hypre_StructMatrix *smatrix;
hypre_Box *box, *ibox0, *ibox1, *tobox, *frbox;
hypre_Index stride, loop_size;
hypre_IndexRef offset, start;
hypre_BoxManEntry **frentries, **toentries;
hypre_SStructBoxManInfo *frinfo, *toinfo;
HYPRE_Complex *tvalues = NULL;
HYPRE_Int tvalues_size = 0;
HYPRE_Int nfrentries, ntoentries, frpart, topart;
HYPRE_Int entry, sentry, ei, fri, toi;
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
pgrid = hypre_SStructPMatrixPGrid(pmatrix);
frvartype = hypre_SStructPGridVarType(pgrid, var);
box = hypre_BoxCreate(ndim);
ibox0 = hypre_BoxCreate(ndim);
ibox1 = hypre_BoxCreate(ndim);
tobox = hypre_BoxCreate(ndim);
frbox = hypre_BoxCreate(ndim);
stencil = hypre_SStructPMatrixStencil(pmatrix, var);
smap = hypre_SStructPMatrixSMap(pmatrix, var);
shape = hypre_SStructStencilShape(stencil);
vars = hypre_SStructStencilVars(stencil);
hypre_SetIndex(stride, 1);
for (ei = 0; ei < nentries; ei++)
{
entry = entries[ei];
sentry = smap[entry];
offset = shape[entry];
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entry]);
tovartype = hypre_SStructPGridVarType(pgrid, vars[entry]);
/* shift box in the stencil offset direction */
hypre_CopyBox(set_box, box);
hypre_AddIndexes(hypre_BoxIMin(box), offset, ndim, hypre_BoxIMin(box));
hypre_AddIndexes(hypre_BoxIMax(box), offset, ndim, hypre_BoxIMax(box));
/* get "to" entries */
hypre_SStructGridIntersect(grid, part, vars[entry], box, -1,
&toentries, &ntoentries);
for (toi = 0; toi < ntoentries; toi++)
{
hypre_BoxManEntryGetExtents(
toentries[toi], hypre_BoxIMin(tobox), hypre_BoxIMax(tobox));
hypre_IntersectBoxes(box, tobox, ibox0);
if (hypre_BoxVolume(ibox0))
{
hypre_SStructBoxManEntryGetPart(toentries[toi], part, &topart);
/* shift ibox0 back */
hypre_SubtractIndexes(hypre_BoxIMin(ibox0), offset, ndim,
hypre_BoxIMin(ibox0));
hypre_SubtractIndexes(hypre_BoxIMax(ibox0), offset, ndim,
hypre_BoxIMax(ibox0));
/* get "from" entries */
hypre_SStructGridIntersect(grid, part, var, ibox0, -1,
&frentries, &nfrentries);
for (fri = 0; fri < nfrentries; fri++)
{
/* don't set couplings within the same part unless possibly for
* cell data (to simplify periodic conditions for users) */
hypre_SStructBoxManEntryGetPart(frentries[fri], part, &frpart);
if (topart == frpart)
{
if ( (frvartype != HYPRE_SSTRUCT_VARIABLE_CELL) ||
(tovartype != HYPRE_SSTRUCT_VARIABLE_CELL) )
{
continue;
}
hypre_BoxManEntryGetInfo(frentries[fri], (void **) &frinfo);
hypre_BoxManEntryGetInfo(toentries[toi], (void **) &toinfo);
if ( hypre_SStructBoxManInfoType(frinfo) ==
hypre_SStructBoxManInfoType(toinfo) )
{
continue;
}
}
hypre_BoxManEntryGetExtents(
frentries[fri], hypre_BoxIMin(frbox), hypre_BoxIMax(frbox));
hypre_IntersectBoxes(ibox0, frbox, ibox1);
if (hypre_BoxVolume(ibox1))
{
HYPRE_Int tvalues_new_size = hypre_BoxVolume(ibox1);
tvalues = hypre_TReAlloc_v2(tvalues, HYPRE_Complex, tvalues_size, HYPRE_Complex, tvalues_new_size,
HYPRE_MEMORY_DEVICE);
tvalues_size = tvalues_new_size;
if (action >= 0)
{
/* set or add */
/* copy values into tvalues */
start = hypre_BoxIMin(ibox1);
hypre_BoxGetSize(ibox1, loop_size);
#undef DEVICE_VAR
#define DEVICE_VAR is_device_ptr(tvalues,values)
hypre_BoxLoop2Begin(ndim, loop_size,
ibox1, start, stride, mi,
value_box, start, stride, vi);
{
tvalues[mi] = values[ei + vi * nentries];
}
hypre_BoxLoop2End(mi, vi);
#undef DEVICE_VAR
#define DEVICE_VAR
/* put values into UMatrix */
hypre_SStructUMatrixSetBoxValues(
matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action);
/* zero out values in PMatrix (possibly in ghost) */
hypre_StructMatrixClearBoxValues(
smatrix, ibox1, 1, &sentry, -1, 1);
}
else
{
/* get */
/* get values from UMatrix */
hypre_SStructUMatrixSetBoxValues(
matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action);
/* copy tvalues into values */
start = hypre_BoxIMin(ibox1);
hypre_BoxGetSize(ibox1, loop_size);
#undef DEVICE_VAR
#define DEVICE_VAR is_device_ptr(tvalues,values)
hypre_BoxLoop2Begin(ndim, loop_size,
ibox1, start, stride, mi,
value_box, start, stride, vi);
{
values[ei + vi * nentries] = tvalues[mi];
}
hypre_BoxLoop2End(mi, vi);
#undef DEVICE_VAR
#define DEVICE_VAR
} /* end if action */
} /* end if nonzero ibox1 */
} /* end of "from" boxman entries loop */
hypre_TFree(frentries, HYPRE_MEMORY_HOST);
} /* end if nonzero ibox0 */
} /* end of "to" boxman entries loop */
hypre_TFree(toentries, HYPRE_MEMORY_HOST);
} /* end of entries loop */
hypre_BoxDestroy(box);
hypre_BoxDestroy(ibox0);
hypre_BoxDestroy(ibox1);
hypre_BoxDestroy(tobox);
hypre_BoxDestroy(frbox);
hypre_TFree(tvalues, HYPRE_MEMORY_DEVICE);
return hypre_error_flag;
}
|
Builder.h | /**********************************************************************************
Copyright (c) 2020 Tobias Zündorf
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**********************************************************************************/
#pragma once
#include <algorithm>
#include "../../../../DataStructures/RAPTOR/Data.h"
#include "../../../../Helpers/MultiThreading.h"
#include "../../../../Helpers/Timer.h"
#include "../../../../Helpers/Console/Progress.h"
#include "RangeSearchUsingStations.h"
namespace RAPTOR::TransferShortcuts::Preprocessing {
template<bool ALLOW_REBOARDING_OF_TRIPS, bool DEBUG = false, bool PRUNE_WITH_EXISTING_SHORTCUTS = true, bool REQUIRE_DIRECT_TRANSFER = false>
class Builder {
public:
inline static constexpr bool AllowReboardingOfTrips = ALLOW_REBOARDING_OF_TRIPS;
inline static constexpr bool Debug = DEBUG;
inline static constexpr bool PruneWithExistingShortcuts = PRUNE_WITH_EXISTING_SHORTCUTS;
inline static constexpr bool RequireDirectTransfer = REQUIRE_DIRECT_TRANSFER;
using Type = Builder<AllowReboardingOfTrips, Debug, PruneWithExistingShortcuts, RequireDirectTransfer>;
public:
Builder(const Data& data) :
data(data) {
shortcutGraph.addVertices(data.numberOfStops());
for (const Vertex vertex : shortcutGraph.vertices()) {
shortcutGraph.set(Coordinates, vertex, data.transferGraph.get(Coordinates, vertex));
}
}
void computeShortcuts(const ThreadPinning& threadPinning, const int maxInitialWalking = 15 * 60, const int minDepartureTime = -never, const int maxDepartureTime = never, const bool verbose = true) noexcept {
if (verbose) std::cout << "Computing shortcuts with " << threadPinning.numberOfThreads << " threads." << std::endl;
Progress progress(data.numberOfStops(), verbose);
omp_set_num_threads(threadPinning.numberOfThreads);
#pragma omp parallel
{
threadPinning.pinThread();
DynamicTransferGraph localShortcutGraph = shortcutGraph;
RangeSearchUsingStations<AllowReboardingOfTrips, PruneWithExistingShortcuts, Debug, RequireDirectTransfer> rangeSearch(data, localShortcutGraph, maxInitialWalking);
#pragma omp for schedule(dynamic)
for (size_t i = 0; i < data.numberOfStops(); i++) {
rangeSearch.run(StopId(i), minDepartureTime, maxDepartureTime);
progress++;
}
#pragma omp critical
{
for (const Vertex from : shortcutGraph.vertices()) {
for (const Edge edge : localShortcutGraph.edgesFrom(from)) {
const Vertex to = localShortcutGraph.get(ToVertex, edge);
if (!shortcutGraph.hasEdge(from, to)) {
shortcutGraph.addEdge(from, to).set(TravelTime, localShortcutGraph.get(TravelTime, edge));
} else {
AssertMsg(shortcutGraph.get(TravelTime, shortcutGraph.findEdge(from, to)) == localShortcutGraph.get(TravelTime, edge), "Edge from " << from << " to " << to << " has inconclusive travel time (" << shortcutGraph.get(TravelTime, shortcutGraph.findEdge(from, to)) << ", " << localShortcutGraph.get(TravelTime, edge) << ")");
}
}
}
}
}
progress.finished();
}
inline const DynamicTransferGraph& getShortcutGraph() const noexcept {
return shortcutGraph;
}
inline DynamicTransferGraph& getShortcutGraph() noexcept {
return shortcutGraph;
}
private:
const Data& data;
DynamicTransferGraph shortcutGraph;
};
}
|
VolumetricMaxPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricMaxPooling.c"
#else
static void nn_(VolumetricMaxPooling_updateOutput_frame)(
real *input_p, real *output_p, real *indz_p,
long nslices, long itime, long iwidth, long iheight,
long otime, long owidth, long oheight,
int kT, int kW, int kH, int dT, int dW, int dH) {
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
/* loop over output */
long i, j, ti;
for(ti = 0; ti < otime; ti++) {
for(i = 0; i < oheight; i++) {
for(j = 0; j < owidth; j++) {
/* local pointers */
real *ip = input_p + k * itime * iwidth * iheight
+ ti * iwidth * iheight * dT + i * iwidth * dH + j * dW;
real *op = output_p + k * otime * owidth * oheight
+ ti * owidth * oheight + i * owidth + j;
real *indzp = indz_p + k * otime * owidth * oheight
+ ti * owidth * oheight + i * owidth + j;
/* compute local max: */
real maxval = -THInf;
int x,y,z;
int mx, my, mz;
for(z = 0; z < kT; z++) {
for(y = 0; y < kH; y++) {
for(x = 0; x < kW; x++) {
real val = *(ip + z * iwidth * iheight + y * iwidth + x);
if (val > maxval) {
maxval = val;
mz = z;
my = y;
mx = x;
}
}
}
}
// set max values
((unsigned char*)(indzp))[0] = mz;
((unsigned char*)(indzp))[1] = my;
((unsigned char*)(indzp))[2] = mx;
((unsigned char*)(indzp))[3] = 0;
/* set output to local max */
*op = maxval;
}
}
}
}
}
static int nn_(VolumetricMaxPooling_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
int kT = luaT_getfieldcheckint(L, 1, "kT");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dT = luaT_getfieldcheckint(L, 1, "dT");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor);
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
long nslices;
long itime;
long iheight;
long iwidth;
long otime;
long oheight;
long owidth;
real *input_data;
real *output_data;
real *indices_data;
luaL_argcheck(L, input->nDimension == 4 || input->nDimension == 5, 2,
"4D or 5D (batch-mode) tensor expected");
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->nDimension == 5) {
dimN++;
dimt++;
dimh++;
dimw++;
}
luaL_argcheck(L, input->size[dimw] >= kW &&
input->size[dimh] >= kH && input->size[dimt] >= kT, 2,
"input image smaller than kernel size");
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
otime = (itime - kT) / dT + 1;
oheight = (iheight - kH) / dH + 1;
owidth = (iwidth - kW) / dW + 1;
/* get contiguous input */
input = THTensor_(newContiguous)(input);
if (input->nDimension == 4) { /* non-batch mode */
/* resize output */
THTensor_(resize4d)(output, nslices, otime, oheight, owidth);
/* indices will contain ti,i,j uchar locations packed into float/double */
THTensor_(resize4d)(indices, nslices, otime, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THTensor_(data)(indices);
nn_(VolumetricMaxPooling_updateOutput_frame)(input_data, output_data,
indices_data,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH, dT, dW, dH);
} else { /* batch mode */
long p;
long nBatch = input->size[0];
long istride = nslices * itime * iwidth * iheight;
long ostride = nslices * otime * owidth * oheight;
/* resize output */
THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth);
/* indices will contain ti,i,j locations for each output point */
THTensor_(resize5d)(indices, nBatch, nslices, otime, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THTensor_(data)(indices);
#pragma omp parallel for private(p)
for (p=0; p < nBatch; p++) {
nn_(VolumetricMaxPooling_updateOutput_frame)(
input_data + p * istride,
output_data + p * ostride,
indices_data + p * ostride,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH, dT, dW, dH);
}
}
/* cleanup */
THTensor_(free)(input);
return 1;
}
static void nn_(VolumetricMaxPooling_updateGradInput_frame)(
real *gradInput_p, real *gradOutput_p, real *indz_p,
long nslices,
long itime, long iwidth, long iheight,
long otime, long owidth, long oheight,
int dT, int dW, int dH) {
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++) {
real *gradInput_p_k = gradInput_p + k * itime * iwidth * iheight;
real *gradOutput_p_k = gradOutput_p + k * otime * owidth * oheight;
real *indz_p_k = indz_p + k * otime * owidth * oheight;
/* calculate max points */
long ti, i, j;
for(ti = 0; ti < otime; ti++) {
for(i = 0; i < oheight; i++) {
for(j = 0; j < owidth; j++) {
/* retrieve position of max */
real * indzp = &indz_p_k[ti * oheight * owidth + i * owidth + j];
long maxti = ((unsigned char*)(indzp))[0] + ti * dT;
long maxi = ((unsigned char*)(indzp))[1] + i * dH;
long maxj = ((unsigned char*)(indzp))[2] + j * dW;
/* update gradient */
gradInput_p_k[maxti * iheight * iwidth + maxi * iwidth + maxj] +=
gradOutput_p_k[ti * oheight * owidth + i * owidth + j];
}
}
}
}
}
static int nn_(VolumetricMaxPooling_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
int dT = luaT_getfieldcheckint(L, 1, "dT");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor);
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
int nslices;
int itime;
int iheight;
int iwidth;
int otime;
int oheight;
int owidth;
real *gradInput_data;
real *gradOutput_data;
real *indices_data;
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 5) {
dimN++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
otime = gradOutput->size[dimt];
oheight = gradOutput->size[dimh];
owidth = gradOutput->size[dimw];
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
indices_data = THTensor_(data)(indices);
/* backprop */
if (input->nDimension == 4) { /* non-batch mode*/
nn_(VolumetricMaxPooling_updateGradInput_frame)(
gradInput_data, gradOutput_data,
indices_data,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH);
}
else { /* batch mode */
long p;
long nBatch = input->size[0];
long istride = nslices * itime * iwidth * iheight;
long ostride = nslices * otime * owidth * oheight;
#pragma omp parallel for private(p)
for (p = 0; p < nBatch; p++) {
nn_(VolumetricMaxPooling_updateGradInput_frame)(
gradInput_data + p * istride,
gradOutput_data + p * ostride,
indices_data + p * ostride,
nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
dT, dW, dH);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
return 1;
}
static const struct luaL_Reg nn_(VolumetricMaxPooling__) [] = {
{"VolumetricMaxPooling_updateOutput", nn_(VolumetricMaxPooling_updateOutput)},
{"VolumetricMaxPooling_updateGradInput", nn_(VolumetricMaxPooling_updateGradInput)},
{NULL, NULL}
};
static void nn_(VolumetricMaxPooling_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, nn_(VolumetricMaxPooling__), "nn");
lua_pop(L,1);
}
#endif
|
threading_std.h | #include <cassert>
#include <cstddef>
#include <future>
#include <type_traits>
#include <vector>
#include "thread_count.h"
#ifndef THREADING_STD_LAUNCH
#define THREADING_STD_LAUNCH async // async or deferred
#endif
namespace threading_common {
class split {};
class auto_partitioner {};
// class static_partitioner;
// class affinity_partitioner;
//! A range over which to iterate.
template <typename Value>
class blocked_range {
public:
//! Type of a value
/** Called a const_iterator for sake of algorithms that need to treat a blocked_range
as an STL container. */
using const_iterator = Value;
//! Type for size of a range
using size_type = std::size_t;
//! Construct range over half-open interval [begin,end), with the given grainsize.
blocked_range(Value begin_, Value end_ /*TODO , size_type grainsize_=1*/)
: my_end(end_)
, my_begin(begin_) //, my_grainsize(grainsize_)
{
// assert( my_grainsize>0 && "grainsize must be positive" );
}
//! Beginning of range.
const_iterator begin() const { return my_begin; }
//! One past last value in range.
const_iterator end() const { return my_end; }
//! Size of the range
/** Unspecified if end()<begin(). */
size_type size() const {
assert(!(end() < begin()) && "size() unspecified if end()<begin()");
return size_type(my_end - my_begin);
}
//! The grain size for this range.
size_type grainsize() const { return 1 /*my_grainsize*/; }
//------------------------------------------------------------------------
// Methods that implement Range concept
//------------------------------------------------------------------------
//! True if range is empty.
bool empty() const { return !(my_begin < my_end); }
//! True if range is divisible.
/** Unspecified if end()<begin(). */
bool is_divisible() const { return /*TODO my_grainsize<*/ size(); }
//! Split range.
/** The new Range *this has the second part, the old range r has the first part.
Unspecified if end()<begin() or !is_divisible(). */
blocked_range(blocked_range& r, split)
: my_end(r.my_end)
, my_begin(do_split(r, split()))
// TODO , my_grainsize(r.my_grainsize)
{
// only comparison 'less than' is required from values of blocked_range objects
assert(!(my_begin < r.my_end) && !(r.my_end < my_begin) &&
"blocked_range has been split incorrectly");
}
private:
/** NOTE: my_end MUST be declared before my_begin, otherwise the splitting constructor
* will break. */
Value my_end;
Value my_begin;
// TODO size_type my_grainsize;
//! Auxiliary function used by the splitting constructor.
static Value do_split(blocked_range& r, split) {
assert(r.is_divisible() && "cannot split blocked_range that is not divisible");
Value middle = r.my_begin + (r.my_end - r.my_begin) / 2u;
r.my_end = middle;
return middle;
}
};
} // namespace threading_common
namespace threading_std {
using std::future;
using namespace threading_common;
constexpr auto launch = std::launch::THREADING_STD_LAUNCH;
template <typename Fn,
typename... Args,
typename Result = std::result_of_t<Fn && (Args && ...)>>
future<Result> async(Fn&& fn, Args&&... args) {
return std::async(launch, std::forward<Fn>(fn), std::forward<Args>(args)...);
}
class task_group {
std::vector<future<void>> threads_;
public:
template <typename F>
void run(F&& f) {
threads_.emplace_back(async(std::forward<F>(f)));
}
void cancel() { /*not implemented*/
}
void wait() { // TODO task_group_status ?
for (auto& child : this->threads_) {
child.wait();
}
}
}; // class task_group
//! Parallel iteration over range with default partitioner.
/** @ingroup algorithms **/
// template<typename Range, typename Body, typename Partitioner = auto_partitioner>
// void parallel_for( const Range& range, const Body& body, const Partitioner &p =
// Partitioner());
template <typename Int, typename Body, typename Partitioner = auto_partitioner>
void parallel_for(const blocked_range<Int>& range,
const Body& body,
const Partitioner& p = Partitioner()) {
const Int worker_count = cpu_threads();
std::vector<std::future<void>> worker_threads;
worker_threads.reserve(worker_count);
for (Int i = 0,
start_entry = range.begin(),
stop_entry = range.end(),
stride = (range.size() + worker_count - 1) / worker_count;
i < worker_count && start_entry < stop_entry;
++i, start_entry += stride) {
const auto end_entry = std::min(start_entry + stride, stop_entry);
// TODO grainsize?
worker_threads.emplace_back(
std::async(launch, body, blocked_range<Int>(start_entry, end_entry)));
}
for (auto& child : worker_threads) {
child.wait();
}
}
//! Parallel iteration over a range of integers with a default step value and default
//! partitioner
template <typename Index, typename Function, typename Partitioner = auto_partitioner>
void parallel_for(Index first,
Index last,
const Function& f,
const Partitioner& p = Partitioner()) {
parallel_for(
blocked_range<Index>(first, last),
[&f](const blocked_range<Index>& r) {
//#pragma ivdep
//#pragma omp simd
for (auto i = r.begin(), e = r.end(); i < e; i++) {
f(i);
}
},
p);
}
//! Parallel iteration with reduction
/** @ingroup algorithms **/
template <typename Int,
typename Value,
typename RealBody,
typename Reduction,
typename Partitioner = auto_partitioner>
Value parallel_reduce(const blocked_range<Int>& range,
const Value& identity,
const RealBody& real_body,
const Reduction& reduction,
const Partitioner& p = Partitioner()) {
const size_t worker_count = cpu_threads();
std::vector<std::future<Value>> worker_threads;
worker_threads.reserve(worker_count);
for (Int i = 0,
start_entry = range.begin(),
stop_entry = range.end(),
stride = (range.size() + worker_count - 1) / worker_count;
i < worker_count && start_entry < stop_entry;
++i, start_entry += stride) {
const auto end_entry = std::min(start_entry + stride, stop_entry);
// TODO grainsize?
worker_threads.emplace_back(std::async(
launch, real_body, blocked_range<Int>(start_entry, end_entry), Value{}));
}
Value v = identity;
for (auto& child : worker_threads) {
v = reduction(v, child.get());
}
return v;
}
} // namespace threading_std
|
TimeCluster.h | /******************************************************************************
** Copyright (c) 2015, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
/**
* @file
* This file is part of SeisSol.
*
* @author Alex Breuer (breuer AT mytum.de, http://www5.in.tum.de/wiki/index.php/Dipl.-Math._Alexander_Breuer)
*
* @section LICENSE
* Copyright (c) 2013-2015, SeisSol Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* LTS cluster in SeisSol.
**/
#ifndef TIMECLUSTER_H_
#define TIMECLUSTER_H_
#ifdef USE_MPI
#include <mpi.h>
#include <list>
#endif
#include <Initializer/typedefs.hpp>
#include <SourceTerm/typedefs.hpp>
#include <utils/logger.h>
#include <Initializer/LTS.h>
#include <Initializer/tree/LTSTree.hpp>
#include <Kernels/Time.h>
#include <Kernels/Local.h>
#include <Kernels/Neighbor.h>
#include <Kernels/DynamicRupture.h>
#include <Kernels/Plasticity.h>
#include <Kernels/TimeCommon.h>
#include <Solver/FreeSurfaceIntegrator.h>
#include <Monitoring/LoopStatistics.h>
#include <Monitoring/ActorStateStatistics.h>
#include "AbstractTimeCluster.h"
#ifdef ACL_DEVICE
#include <device.h>
#include <Solver/Pipeline/DrPipeline.h>
#endif
namespace seissol {
namespace time_stepping {
class TimeCluster;
}
namespace kernels {
class ReceiverCluster;
}
}
/**
* Time cluster, which represents a collection of elements having the same time step width.
**/
class seissol::time_stepping::TimeCluster : public seissol::time_stepping::AbstractTimeCluster
{
private:
// Last correction time of the neighboring cluster with higher dt
double lastSubTime;
void handleAdvancedPredictionTimeMessage(const NeighborCluster& neighborCluster) override;
void handleAdvancedCorrectionTimeMessage(const NeighborCluster& neighborCluster) override;
void start() override {}
void predict() override;
void correct() override;
bool usePlasticity;
//! number of time steps
unsigned long m_numberOfTimeSteps;
/*
* integrators
*/
//! time kernel
kernels::Time m_timeKernel;
//! local kernel
kernels::Local m_localKernel;
//! neighbor kernel
kernels::Neighbor m_neighborKernel;
kernels::DynamicRupture m_dynamicRuptureKernel;
/*
* global data
*/
//! global data structures
GlobalData *m_globalDataOnHost{nullptr};
GlobalData *m_globalDataOnDevice{nullptr};
#ifdef ACL_DEVICE
device::DeviceInstance& device = device::DeviceInstance::getInstance();
dr::pipeline::DrPipeline drPipeline;
#endif
/*
* element data
*/
seissol::initializers::Layer* m_clusterData;
seissol::initializers::Layer* dynRupInteriorData;
seissol::initializers::Layer* dynRupCopyData;
seissol::initializers::LTS* m_lts;
seissol::initializers::DynamicRupture* m_dynRup;
//! Mapping of cells to point sources
sourceterm::CellToPointSourcesMapping const* m_cellToPointSources;
//! Number of mapping of cells to point sources
unsigned m_numberOfCellToPointSourcesMappings;
//! Point sources
sourceterm::PointSources const* m_pointSources;
enum class ComputePart {
Local = 0,
Neighbor,
DRNeighbor,
DRFrictionLawInterior,
DRFrictionLawCopy,
PlasticityCheck,
PlasticityYield,
NUM_COMPUTE_PARTS
};
long long m_flops_nonZero[static_cast<int>(ComputePart::NUM_COMPUTE_PARTS)];
long long m_flops_hardware[static_cast<int>(ComputePart::NUM_COMPUTE_PARTS)];
//! Tv parameter for plasticity
double m_tv;
//! Relax time for plasticity
double m_oneMinusIntegratingFactor;
//! Stopwatch of TimeManager
LoopStatistics* m_loopStatistics;
ActorStateStatistics* actorStateStatistics;
unsigned m_regionComputeLocalIntegration;
unsigned m_regionComputeNeighboringIntegration;
unsigned m_regionComputeDynamicRupture;
kernels::ReceiverCluster* m_receiverCluster;
/**
* Writes the receiver output if applicable (receivers present, receivers have to be written).
**/
void writeReceivers();
/**
* Computes the source terms if applicable.
**/
void computeSources();
/**
* Computes dynamic rupture.
**/
void computeDynamicRupture( seissol::initializers::Layer& layerData );
/**
* Computes all cell local integration.
*
* This are:
* * time integration
* * volume integration
* * local boundary integration
*
* Remark: After this step the DOFs are only updated half with the boundary contribution
* of the neighborings cells missing.
*
* @param i_numberOfCells number of cells.
* @param i_cellInformation cell local information.
* @param i_cellData cell data.
* @param io_buffers time integration buffers.
* @param io_derivatives time derivatives.
* @param io_dofs degrees of freedom.
**/
void computeLocalIntegration( seissol::initializers::Layer& i_layerData, bool resetBuffers);
/**
* Computes the contribution of the neighboring cells to the boundary integral.
*
* Remark: After this step (in combination with the local integration) the DOFs are at the next time step.
* TODO: This excludes dynamic rupture contribution.
*
* @param i_numberOfCells number of cells.
* @param i_cellInformation cell local information.
* @param i_cellData cell data.
* @param i_faceNeighbors pointers to neighboring time buffers or derivatives.
* @param io_dofs degrees of freedom.
**/
void computeNeighboringIntegration( seissol::initializers::Layer& i_layerData, double subTimeStart );
void computeLocalIntegrationFlops(seissol::initializers::Layer& layerData);
#ifndef ACL_DEVICE
template<bool usePlasticity>
std::pair<long, long> computeNeighboringIntegrationImplementation(seissol::initializers::Layer& i_layerData,
double subTimeStart) {
SCOREP_USER_REGION( "computeNeighboringIntegration", SCOREP_USER_REGION_TYPE_FUNCTION )
m_loopStatistics->begin(m_regionComputeNeighboringIntegration);
real* (*faceNeighbors)[4] = i_layerData.var(m_lts->faceNeighbors);
CellDRMapping (*drMapping)[4] = i_layerData.var(m_lts->drMapping);
CellLocalInformation* cellInformation = i_layerData.var(m_lts->cellInformation);
PlasticityData* plasticity = i_layerData.var(m_lts->plasticity);
real (*pstrain)[7 * NUMBER_OF_ALIGNED_BASIS_FUNCTIONS] = i_layerData.var(m_lts->pstrain);
unsigned numberOTetsWithPlasticYielding = 0;
kernels::NeighborData::Loader loader;
loader.load(*m_lts, i_layerData);
real *l_timeIntegrated[4];
real *l_faceNeighbors_prefetch[4];
#ifdef _OPENMP
#pragma omp parallel for schedule(static) default(none) private(l_timeIntegrated, l_faceNeighbors_prefetch) shared(cellInformation, loader, faceNeighbors, pstrain, i_layerData, plasticity, drMapping, subTimeStart) reduction(+:numberOTetsWithPlasticYielding)
#endif
for( unsigned int l_cell = 0; l_cell < i_layerData.getNumberOfCells(); l_cell++ ) {
auto data = loader.entry(l_cell);
seissol::kernels::TimeCommon::computeIntegrals(m_timeKernel,
data.cellInformation.ltsSetup,
data.cellInformation.faceTypes,
subTimeStart,
timeStepSize(),
faceNeighbors[l_cell],
#ifdef _OPENMP
*reinterpret_cast<real (*)[4][tensor::I::size()]>(&(m_globalDataOnHost->integrationBufferLTS[omp_get_thread_num()*4*tensor::I::size()])),
#else
*reinterpret_cast<real (*)[4][tensor::I::size()]>(m_globalData->integrationBufferLTS),
#endif
l_timeIntegrated);
#ifdef ENABLE_MATRIX_PREFETCH
l_faceNeighbors_prefetch[0] = (cellInformation[l_cell].faceTypes[1] != FaceType::dynamicRupture) ?
faceNeighbors[l_cell][1] :
drMapping[l_cell][1].godunov;
l_faceNeighbors_prefetch[1] = (cellInformation[l_cell].faceTypes[2] != FaceType::dynamicRupture) ?
faceNeighbors[l_cell][2] :
drMapping[l_cell][2].godunov;
l_faceNeighbors_prefetch[2] = (cellInformation[l_cell].faceTypes[3] != FaceType::dynamicRupture) ?
faceNeighbors[l_cell][3] :
drMapping[l_cell][3].godunov;
// fourth face's prefetches
if (l_cell < (i_layerData.getNumberOfCells()-1) ) {
l_faceNeighbors_prefetch[3] = (cellInformation[l_cell+1].faceTypes[0] != FaceType::dynamicRupture) ?
faceNeighbors[l_cell+1][0] :
drMapping[l_cell+1][0].godunov;
} else {
l_faceNeighbors_prefetch[3] = faceNeighbors[l_cell][3];
}
#endif
m_neighborKernel.computeNeighborsIntegral( data,
drMapping[l_cell],
#ifdef ENABLE_MATRIX_PREFETCH
l_timeIntegrated, l_faceNeighbors_prefetch
#else
l_timeIntegrated
#endif
);
if constexpr (usePlasticity) {
updateRelaxTime();
numberOTetsWithPlasticYielding += seissol::kernels::Plasticity::computePlasticity( m_oneMinusIntegratingFactor,
timeStepSize(),
m_tv,
m_globalDataOnHost,
&plasticity[l_cell],
data.dofs,
pstrain[l_cell] );
}
#ifdef INTEGRATE_QUANTITIES
seissol::SeisSol::main.postProcessor().integrateQuantities( m_timeStepWidth,
i_layerData,
l_cell,
dofs[l_cell] );
#endif // INTEGRATE_QUANTITIES
}
const long long nonZeroFlopsPlasticity =
i_layerData.getNumberOfCells() * m_flops_nonZero[static_cast<int>(ComputePart::PlasticityCheck)] +
numberOTetsWithPlasticYielding * m_flops_nonZero[static_cast<int>(ComputePart::PlasticityYield)];
const long long hardwareFlopsPlasticity =
i_layerData.getNumberOfCells() * m_flops_hardware[static_cast<int>(ComputePart::PlasticityCheck)] +
numberOTetsWithPlasticYielding * m_flops_hardware[static_cast<int>(ComputePart::PlasticityYield)];
m_loopStatistics->end(m_regionComputeNeighboringIntegration, i_layerData.getNumberOfCells(), m_globalClusterId);
return {nonZeroFlopsPlasticity, hardwareFlopsPlasticity};
}
#endif // ACL_DEVICE
void computeLocalIntegrationFlops(unsigned numberOfCells,
CellLocalInformation const* cellInformation,
long long& nonZeroFlops,
long long& hardwareFlops);
void computeNeighborIntegrationFlops(seissol::initializers::Layer &layerData);
void computeDynamicRuptureFlops(seissol::initializers::Layer &layerData,
long long& nonZeroFlops,
long long& hardwareFlops);
void computeFlops();
//! Update relax time for plasticity
void updateRelaxTime() {
m_oneMinusIntegratingFactor = (m_tv > 0.0) ? 1.0 - exp(-timeStepSize() / m_tv) : 1.0;
}
const LayerType layerType;
//! time of the next receiver output
double m_receiverTime;
//! print status every 100th timestep
bool printProgress;
//! cluster id on this rank
const unsigned int m_clusterId;
//! global cluster cluster id
const unsigned int m_globalClusterId;
DynamicRuptureScheduler* dynamicRuptureScheduler;
void printTimeoutMessage(std::chrono::seconds timeSinceLastUpdate) override;
public:
ActResult act() override;
/**
* Constructs a new LTS cluster.
*
* @param i_clusterId id of this cluster with respect to the current rank.
* @param i_globalClusterId global id of this cluster.
* @param usePlasticity true if using plasticity
* @param i_timeKernel time integration kernel.
* @param i_volumeKernel volume integration kernel.
* @param i_boundaryKernel boundary integration kernel.
* @param i_copyCellInformation cell information in the copy layer.
* @param i_interiorCellInformation cell information in the interior.
* @param i_globalData global data.
* @param i_copyCellData cell data in the copy layer.
* @param i_interiorCellData cell data in the interior.
* @param i_cells degrees of freedom, time buffers, time derivatives.
**/
TimeCluster(unsigned int i_clusterId, unsigned int i_globalClusterId, bool usePlasticity,
LayerType layerType, double maxTimeStepSize,
long timeStepRate, bool printProgress,
DynamicRuptureScheduler* dynamicRuptureScheduler, CompoundGlobalData i_globalData,
seissol::initializers::Layer *i_clusterData, seissol::initializers::Layer* dynRupInteriorData,
seissol::initializers::Layer* dynRupCopyData, seissol::initializers::LTS* i_lts,
seissol::initializers::DynamicRupture* i_dynRup, LoopStatistics* i_loopStatistics,
ActorStateStatistics* actorStateStatistics);
/**
* Destructor of a LTS cluster.
* TODO: Currently prints only statistics in debug mode.
**/
~TimeCluster() override;
/**
* Sets the pointer to the cluster's point sources
*
* @param i_cellToPointSources Contains mappings of 1 cell offset to m point sources
* @param i_numberOfCellToPointSourcesMappings Size of i_cellToPointSources
* @param i_pointSources pointer to all point sources used on this cluster
*/
void setPointSources( sourceterm::CellToPointSourcesMapping const* i_cellToPointSources,
unsigned i_numberOfCellToPointSourcesMappings,
sourceterm::PointSources const* i_pointSources );
void setReceiverCluster( kernels::ReceiverCluster* receiverCluster) {
m_receiverCluster = receiverCluster;
}
/**
* Set Tv constant for plasticity.
*/
void setTv(double tv) {
m_tv = tv;
updateRelaxTime();
}
void reset() override;
[[nodiscard]] unsigned int getClusterId() const;
[[nodiscard]] unsigned int getGlobalClusterId() const;
[[nodiscard]] LayerType getLayerType() const;
void setReceiverTime(double receiverTime);
};
#endif
|
sum.c | /*
* Assignment 2 (CSE436)
* Kazumi Malhan
* 06/08/2016
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
/* read timer in ms */
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
#define REAL float
#define VECTOR_LENGTH 102400
/* initialize a vector with random floating point numbers */
void init(REAL *A, int N) {
int i;
for (i = 0; i < N; i++) {
A[i] = (double) drand48();
}
}
/* Function Prototypes */
REAL sum (int N, REAL *A);
REAL sum_omp_parallel (int N, REAL *A, int num_tasks);
REAL sum_omp_parallel_for (int N, REAL *A, int num_tasks);
/*
* To compile: gcc sum.c -fopenmp -o sum
* To run: ./sum N num_tasks
*/
int main(int argc, char *argv[]) {
int N = VECTOR_LENGTH;
int num_tasks = 4;
double elapsed_serial, elapsed_para, elapsed_para_for; /* for timing */
if (argc < 3) {
fprintf(stderr, "Usage: sum [<N(%d)>] [<#tasks(%d)>]\n", N,num_tasks);
fprintf(stderr, "\t Example: ./sum %d %d\n", N,num_tasks);
} else {
N = atoi(argv[1]);
num_tasks = atoi(argv[2]);
}
REAL *A = (REAL*)malloc(sizeof(REAL)*N);
srand48((1 << 12));
init(A, N);
/* Serial Run */
elapsed_serial = read_timer();
REAL result = sum(N, A);
elapsed_serial = (read_timer() - elapsed_serial);
/* Parallel Run */
elapsed_para = read_timer();
result = sum_omp_parallel(N, A, num_tasks);
elapsed_para = (read_timer() - elapsed_para);
/* Parallel For Run */
elapsed_para_for = read_timer();
result = sum_omp_parallel_for(N, A, num_tasks);
elapsed_para_for = (read_timer() - elapsed_para_for);
/* you should add the call to each function and time the execution */
printf("======================================================================================================\n");
printf("\tSum %d numbers with %d tasks\n", N, num_tasks);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (ms)\t MFLOPS \n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("Sum Serial:\t\t\t%4f\t%4f\n", elapsed_serial * 1.0e3, 2*N / (1.0e6 * elapsed_serial));
printf("Sum Parallel:\t\t\t%4f\t%4f\n", elapsed_para * 1.0e3, (2*N+num_tasks) / (1.0e6 * elapsed_para));
printf("Sum Parallel For:\t\t%4f\t%4f\n", elapsed_para_for * 1.0e3, 2*N / (1.0e6 * elapsed_para_for));
free(A);
return 0;
}
/* Serial Implemenration */
REAL sum(int N, REAL *A) {
int i;
REAL result = 0.0;
for (i = 0; i < N; ++i)
result += A[i];
return result;
}
/* Parallel Implemenration */
REAL sum_omp_parallel (int N, REAL *A, int num_tasks) {
REAL result = 0.0;
int partial_result[num_tasks];
int t;
/* Determine if task can be evenly distrubutable */
int each_task = N / num_tasks;
int leftover = N - (each_task * num_tasks);
#pragma omp parallel shared (N, A, num_tasks, leftover) num_threads(num_tasks)
{
int i, tid, istart, iend;
REAL temp;
tid = omp_get_thread_num();
istart = tid * (N / num_tasks);
iend = (tid + 1) * (N / num_tasks);
for (i = istart; i < iend; ++i) {
temp += A[i];
}
/* Take care left over */
if (tid < leftover) {
temp += A[N - tid - 1];
}
partial_result[tid] = temp;
} // end of parallel
/* Add result together */
for(t=0; t<num_tasks; t++)
result += partial_result[t];
return result;
}
/* Parallel For Implemenration */
REAL sum_omp_parallel_for (int N, REAL *A, int num_tasks) {
int i;
REAL result = 0.0;
# pragma omp parallel shared (N, A, result) private (i) num_threads(num_tasks)
{
# pragma omp for schedule(runtime) reduction (+:result) nowait
for (i = 0; i < N; ++i) {
result += A[i];
}
} // end of parallel
return result;
}
|
pr78899.c | /* PR tree-optimization/78899 */
/* { dg-do compile } */
/* { dg-options "-Ofast -fopenmp-simd -mavx2 -mno-avx512f" } */
#define N 1024
#define M 4
int p1[N], p2[N], p3[N], c[N];
void
foo (int n)
{
int i, k;
for (k = 0; k < n / M; k++)
{
#pragma omp simd
for (i = 0; i < M; i++)
if (c[k * M + i])
{
p1[k * M + i] += 1;
p2[k * M + i] = p3[k * M + i] + 2;
}
}
}
/* Ensure the loop is vectorized. */
/* { dg-final { scan-assembler "vpmaskmov" } } */
/* { dg-final { scan-assembler "vpadd" } } */
|
pointer2Array2.c | // array types from a parameter list have to be converted to corresponding pointer types
// to avoid segmentation fault.
// Kernel is extracted from cg of npb2.3 omp c benchmarks.
static int colidx[100][100];
static void makea (int colidx[100][100])
{
int i,j;
#pragma omp parallel for private(i,j)
for (i = 1; i <= 100; i++)
for (j = 1; j <= 100; j++)
colidx[i][j] = 0;
}
int main()
{
makea(colidx);
}
|
trsm_x_coo_n_hi_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_Number diag[m];
memset(diag, '\0', m * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < A->nnz; r++)
{
if(A->row_indx[r] == A->col_indx[r])
{
diag[A->row_indx[r]] = A->values[r];
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = m - 1; r >= 0; r--)
{
ALPHA_Number temp ;
alpha_setzero(temp);
for (ALPHA_INT cr = A->nnz - 1; cr >= 0; cr--)
{
int row = A->row_indx[cr];
int col = A->col_indx[cr];
if(row == r && col > r)
alpha_madde(temp, A->values[cr], y[col * ldy + out_y_col]);
}
ALPHA_Number t;
alpha_mul(t, alpha, x[r * ldx + out_y_col]);
alpha_sub(t, t, temp);
alpha_div(y[r * ldy + out_y_col], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
fdtd-2d.pluto.par.l2tile.c |
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define tmax T
#define nx N
#define ny N
double ex[nx][ny +1];
double ey[nx +1][ny];
double hz[nx][ny];
void init_arrays()
{
int i, j;
for (i=0; i<nx+1; i++) {
for (j=0; j<ny; j++) {
ey[i][j] = 0;
}
}
for (i=0; i<nx; i++) {
for (j=0; j<ny+1; j++) {
ex[i][j] = 0;
}
}
for (j=0; j<ny; j++) {
ey[0][j] = ((double)j)/ny;
}
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
hz[i][j] = 0;
}
}
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_arrays();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
#include <math.h>
#include <assert.h>
#include <omp.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
int c1, c2, c3, c4, c5, c6, c7, c8, c9;
register int lb, ub, lb1, ub1, lb2, ub2;
register int lbv, ubv;
for (c1=-1;c1<=floord(2*tmax+ny-2,256);c1++) {
lb1=max(max(ceild(256*c1-tmax+1,256),ceild(256*c1-255,512)),0);
ub1=min(min(floord(tmax+ny-1,256),floord(256*c1+255,256)),floord(256*c1+ny+255,512));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9)
for (c2=lb1; c2<=ub1; c2++) {
for (c3=max(max(max(max(max(max(max(max(max(max(0,ceild(256*c1-256*c2-255*ny-64515,65280)),ceild(256*c2-ny-254,256)),ceild(512*c1-768*c2-509,256)),ceild(256*c1-256*c2-255,256)),ceild(256*c1-256*c2-ny-253,256)),ceild(256*c1-65536*c2-129795,65280)),ceild(65280*c1-130304*c2-254*nx-ny-64515,256)),ceild(65536*c1-130816*c2-254*nx-64771,256)),ceild(256*c1-65280*c2-254*nx-ny-64515,256)),ceild(256*c1-65536*c2-254*nx-64771,256));c3<=min(min(min(floord(256*c2+nx+254,256),floord(tmax+nx-1,256)),floord(256*c1-256*c2+nx+255,256)),c1+255*c2+nx+254);c3++) {
for (c4=max(max(max(max(max(max(max(max(8*c1-8*c2,0),ceild(256*c3-nx-31,32)),ceild(256*c1-57600*c2-256*c3-223*nx-56928,32)),ceild(256*c2-ny-31,32)),ceild(256*c1-57344*c2-256*c3-223*nx-ny-56672,32)),8*c1-8*c2-1792*c3-7*ny-1771),8*c1-1800*c2-1792*c3-3563),-256*c2+8*c3-nx-254);c4<=min(min(min(min(min(min(min(min(min(min(min(min(min(min(min(floord(256*c2+255,32),floord(tmax-1,32)),8*c1-8*c2+7),floord(7936*c2+7936*c3+15779,32)),floord(256*c2+256*c3+509,64)),floord(7936*c3+31*ny+7843,32)),floord(256*c3+255,32)),floord(256*c3+ny+253,32)),floord(256*c1+57344*c2-256*c3+225*nx+57150,7200)),floord(256*c1+57344*c2-256*c3+225*nx+57150,32)),floord(7680*c2+256*c3+30*nx+ny+7843,992)),floord(7680*c2+256*c3+30*nx+ny+7843,32)),floord(7936*c2+256*c3+30*nx+8099,1024)),floord(7936*c2+256*c3+30*nx+8099,32)),floord(-256*c1+65280*c2+256*c3+254*nx+ny+64515,8128)),floord(-256*c1+65536*c2+256*c3+254*nx+64771,8160));c4++) {
for (c5=max(max(max(max(max(max(max(max(max(max(max(max(max(max(max(max(max(max(max(max(8*c2,ceild(-256*c1+256*c2+256*c3+32*c4-225*nx-6750,7200)),ceild(-256*c1+256*c2+256*c3+7200*c4-225*nx-6750,7200)),ceild(256*c1-256*c2-256*c3+8128*c4-254*nx-ny-7619,8128)),ceild(256*c1-256*c2-256*c3-254*nx-7651,8160)),ceild(-c1+c2+c3-nx-30,32)),ceild(256*c1-256*c2-256*c3-254*nx-ny-7619,8128)),0),ceild(32*c4-31,32)),ceild(256*c1-256*c2-256*c3+8160*c4-254*nx-7651,8160)),ceild(256*c3-nx-30,32)),ceild(8*c3-c4-nx-30,32)),ceild(-256*c3+32*c4-30*nx-ny-1123,960)),ceild(-256*c3+992*c4-30*nx-ny-1123,960)),ceild(-256*c3+64*c4-285,32)),ceild(-7936*c3+32*c4-8835,992)),ceild(256*c1-256*c2-256*c3-32*c4-223*nx-ny-6720,7136)),ceild(-256*c3+1024*c4-30*nx-1155,992)),ceild(256*c1-256*c2-256*c3-32*c4-223*nx-6752,7168)),ceild(8*c1-8*c2-1792*c3-c4-1995,224)),ceild(-256*c3+32*c4-30*nx-1155,992));c5<=min(min(min(floord(tmax+ny-1,32),8*c2+7),floord(256*c3+ny+254,32)),floord(32*c4+ny+31,32));c5++) {
for (c6=max(max(max(max(max(max(max(max(max(max(max(max(max(max(max(max(max(ceild(32*c4-960*c5-30*nx-ny-899,32),ceild(992*c4-960*c5-30*nx-ny-899,32)),ceild(32*c4-992*c5-30*nx-931,32)),ceild(1024*c4-992*c5-30*nx-931,32)),-8*c1+8*c2+8*c3+c4-224*c5-7*nx-210),-8*c1+8*c2+8*c3+225*c4-224*c5-7*nx-210),ceild(32*c4-31,32)),ceild(32*c4-ny-29,32)),0),ceild(8*c1-8*c2-8*c3-c4-224*c5-434,223)),ceild(-8*c1+8*c2+8*c3+c4-224*c5-434,225)),ceild(32*c5-ny-30,32)),ceild(32*c4-31*ny-899,992)),ceild(-8*c1+8*c2+8*c3+c4-7*ny-210,225)),ceild(8*c1-8*c2-8*c3-c4-7*ny-210,223)),ceild(64*c4-32*c5-61,32)),ceild(32*c4-992*c5-1891,992)),8*c3);c6<=min(min(min(min(min(min(8*c3+7,-8*c1+8*c2+8*c3+c4+224*c5+7*nx+210),floord(32*c5+nx+30,32)),c4+32*c5+nx+30),-8*c1+8*c2+8*c3-223*c4+224*c5+7*nx+210),floord(tmax+nx-1,32)),floord(32*c4+nx+31,32));c6++) {
if ((c4 <= floord(32*c6-nx,32)) && (c5 <= floord(32*c6-nx+ny,32)) && (c6 >= ceild(nx,32))) {
for (c8=max(32*c6-nx+1,32*c5);c8<=min(32*c6-nx+ny,32*c5+31);c8++) {
{hz[nx-1][-32*c6+c8+nx-1]=hz[nx-1][-32*c6+c8+nx-1]-((double)(7))/10*(ey[1+nx-1][-32*c6+c8+nx-1]+ex[nx-1][1+-32*c6+c8+nx-1]-ex[nx-1][-32*c6+c8+nx-1]-ey[nx-1][-32*c6+c8+nx-1]);} ;
}
}
if ((c4 <= floord(32*c5-ny,32)) && (c5 >= max(ceild(ny,32),ceild(32*c6-nx+ny+1,32)))) {
for (c9=max(32*c5-ny+1,32*c6);c9<=min(32*c6+31,32*c5+nx-ny);c9++) {
{hz[-32*c5+c9+ny-1][ny-1]=hz[-32*c5+c9+ny-1][ny-1]-((double)(7))/10*(ey[1+-32*c5+c9+ny-1][ny-1]+ex[-32*c5+c9+ny-1][1+ny-1]-ex[-32*c5+c9+ny-1][ny-1]-ey[-32*c5+c9+ny-1][ny-1]);} ;
}
}
if ((c1 == c2+c3) && (c4 == c6) && (nx >= 2)) {
for (c7=max(max(0,32*c6),32*c5-ny+1);c7<=min(min(min(32*c5-1,32*c6-nx+31),tmax-1),32*c5-ny+31);c7++) {
for (c8=32*c5;c8<=c7+ny-1;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
for (c9=c7+1;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
{hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]);} ;
}
for (c9=c7+1;c9<=c7+nx;c9++) {
{hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]);} ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6) && (nx >= 2) && (ny >= 2)) {
for (c7=max(max(32*c5,0),32*c6);c7<=min(min(32*c6-nx+31,tmax-1),32*c5-ny+31);c7++) {
{ey[0][0]=c7;} ;
for (c9=c7+1;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]);} ;
}
for (c8=c7+1;c8<=c7+ny-1;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
for (c9=c7+1;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
{hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]);} ;
}
for (c9=c7+1;c9<=c7+nx;c9++) {
{hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]);} ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6) && (nx >= 2)) {
for (c7=max(max(0,32*c6),32*c5-ny+32);c7<=min(min(tmax-1,32*c5-1),32*c6-nx+31);c7++) {
for (c8=32*c5;c8<=32*c5+31;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
for (c9=c7+1;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
{hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]);} ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6)) {
for (c7=max(max(max(32*c6-nx+32,0),32*c6),32*c5-ny+1);c7<=min(min(min(32*c5-1,tmax-1),32*c6+30),32*c5-ny+31);c7++) {
for (c8=32*c5;c8<=c7+ny-1;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
for (c9=c7+1;c9<=32*c6+31;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
}
for (c9=c7+1;c9<=32*c6+31;c9++) {
{hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]);} ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6) && (nx >= 2)) {
for (c7=max(max(max(32*c5,0),32*c6),32*c5-ny+32);c7<=min(min(tmax-1,32*c5+30),32*c6-nx+31);c7++) {
{ey[0][0]=c7;} ;
for (c9=c7+1;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]);} ;
}
for (c8=c7+1;c8<=32*c5+31;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
for (c9=c7+1;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
{hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]);} ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6) && (ny >= 2)) {
for (c7=max(max(max(32*c5,32*c6-nx+32),0),32*c6);c7<=min(min(tmax-1,32*c6+30),32*c5-ny+31);c7++) {
{ey[0][0]=c7;} ;
for (c9=c7+1;c9<=32*c6+31;c9++) {
{ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]);} ;
}
for (c8=c7+1;c8<=c7+ny-1;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
for (c9=c7+1;c9<=32*c6+31;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
}
for (c9=c7+1;c9<=32*c6+31;c9++) {
{hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]);} ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6)) {
for (c7=max(max(max(0,32*c6),32*c6-nx+32),32*c5-ny+32);c7<=min(min(tmax-1,32*c6+30),32*c5-1);c7++) {
for (c8=32*c5;c8<=32*c5+31;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
for (c9=c7+1;c9<=32*c6+31;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
}
}
}
if ((c1 == c2+c3) && (c4 == c6)) {
for (c7=max(max(max(max(32*c5,32*c6-nx+32),0),32*c6),32*c5-ny+32);c7<=min(min(tmax-1,32*c5+30),32*c6+30);c7++) {
{ey[0][0]=c7;} ;
for (c9=c7+1;c9<=32*c6+31;c9++) {
{ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]);} ;
}
for (c8=c7+1;c8<=32*c5+31;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
for (c9=c7+1;c9<=32*c6+31;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
}
}
}
if ((c1 == c2+c3) && (c4 == c6) && (nx >= 2) && (ny == 1)) {
for (c7=max(max(0,32*c6),32*c5);c7<=min(min(tmax-1,32*c6+30),32*c5+30);c7++) {
{ey[0][0]=c7;} ;
for (c9=c7+1;c9<=min(c7+nx-1,32*c6+31);c9++) {
{ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]);} ;
}
for (c9=c7+1;c9<=min(c7+nx,32*c6+31);c9++) {
{hz[-c7+c9-1][0]=hz[-c7+c9-1][0]-((double)(7))/10*(ey[1+-c7+c9-1][0]+ex[-c7+c9-1][1+0]-ex[-c7+c9-1][0]-ey[-c7+c9-1][0]);} ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6) && (nx == 1)) {
for (c7=max(max(0,32*c6),32*c5-ny+1);c7<=min(min(min(32*c5-1,tmax-1),32*c6+30),32*c5-ny+31);c7++) {
for (c8=32*c5;c8<=c7+ny-1;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
{hz[0][-c7+c8-1]=hz[0][-c7+c8-1]-((double)(7))/10*(ey[1+0][-c7+c8-1]+ex[0][1+-c7+c8-1]-ex[0][-c7+c8-1]-ey[0][-c7+c8-1]);} ;
}
{hz[0][ny-1]=hz[0][ny-1]-((double)(7))/10*(ey[1+0][ny-1]+ex[0][1+ny-1]-ex[0][ny-1]-ey[0][ny-1]);} ;
}
}
if ((c1 == c2+c3) && (c4 == c6) && (nx == 1) && (ny >= 2)) {
for (c7=max(max(32*c5,0),32*c6);c7<=min(min(tmax-1,32*c6+30),32*c5-ny+31);c7++) {
{ey[0][0]=c7;} ;
for (c8=c7+1;c8<=c7+ny-1;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
{hz[0][-c7+c8-1]=hz[0][-c7+c8-1]-((double)(7))/10*(ey[1+0][-c7+c8-1]+ex[0][1+-c7+c8-1]-ex[0][-c7+c8-1]-ey[0][-c7+c8-1]);} ;
}
{hz[0][ny-1]=hz[0][ny-1]-((double)(7))/10*(ey[1+0][ny-1]+ex[0][1+ny-1]-ex[0][ny-1]-ey[0][ny-1]);} ;
}
}
if ((c1 == c2+c3) && (c4 == c6) && (nx == 1)) {
for (c7=max(max(0,32*c6),32*c5-ny+32);c7<=min(min(tmax-1,32*c6+30),32*c5-1);c7++) {
for (c8=32*c5;c8<=32*c5+31;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
{hz[0][-c7+c8-1]=hz[0][-c7+c8-1]-((double)(7))/10*(ey[1+0][-c7+c8-1]+ex[0][1+-c7+c8-1]-ex[0][-c7+c8-1]-ey[0][-c7+c8-1]);} ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6) && (nx == 1)) {
for (c7=max(max(max(32*c5,0),32*c6),32*c5-ny+32);c7<=min(min(tmax-1,32*c5+30),32*c6+30);c7++) {
{ey[0][0]=c7;} ;
for (c8=c7+1;c8<=32*c5+31;c8++) {
{ey[0][-c7+c8]=c7;} ;
{ex[0][-c7+c8]=ex[0][-c7+c8]-((double)(1))/2*(hz[0][-c7+c8]-hz[0][-c7+c8-1]);} ;
{hz[0][-c7+c8-1]=hz[0][-c7+c8-1]-((double)(7))/10*(ey[1+0][-c7+c8-1]+ex[0][1+-c7+c8-1]-ex[0][-c7+c8-1]-ey[0][-c7+c8-1]);} ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6) && (nx == 1) && (ny == 1)) {
for (c7=max(max(0,32*c6),32*c5);c7<=min(min(tmax-1,32*c5+30),32*c6+30);c7++) {
{ey[0][0]=c7;} ;
{hz[0][0]=hz[0][0]-((double)(7))/10*(ey[1+0][0]+ex[0][1+0]-ex[0][0]-ey[0][0]);} ;
}
}
for (c7=max(max(max(32*c6-nx+1,0),32*c5-ny+1),32*c4);c7<=min(min(min(min(min(32*c5-1,32*c6-nx+31),32*c6-1),tmax-1),32*c4+31),32*c5-ny+31);c7++) {
for (c8=32*c5;c8<=c7+ny-1;c8++) {
for (c9=32*c6;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
{hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]);} ;
}
for (c9=32*c6;c9<=c7+nx;c9++) {
{hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]);} ;
}
}
if (ny >= 2) {
for (c7=max(max(max(32*c5,32*c6-nx+1),0),32*c4);c7<=min(min(min(min(32*c6-nx+31,32*c6-1),tmax-1),32*c4+31),32*c5-ny+31);c7++) {
for (c9=32*c6;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]);} ;
}
for (c8=c7+1;c8<=c7+ny-1;c8++) {
for (c9=32*c6;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
{hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]);} ;
}
for (c9=32*c6;c9<=c7+nx;c9++) {
{hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]);} ;
}
}
}
for (c7=max(max(max(0,32*c6-nx+1),32*c4),32*c5-ny+32);c7<=min(min(min(min(32*c6-1,tmax-1),32*c4+31),32*c5-1),32*c6-nx+31);c7++) {
for (c8=32*c5;c8<=32*c5+31;c8++) {
for (c9=32*c6;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
{hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]);} ;
}
}
for (c7=max(max(max(32*c6-nx+32,0),32*c5-ny+1),32*c4);c7<=min(min(min(min(32*c5-1,32*c6-1),tmax-1),32*c4+31),32*c5-ny+31);c7++) {
for (c8=32*c5;c8<=c7+ny-1;c8++) {
for (c9=32*c6;c9<=32*c6+31;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
}
for (c9=32*c6;c9<=32*c6+31;c9++) {
{hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]);} ;
}
}
for (c7=max(max(max(max(32*c5,0),32*c6-nx+1),32*c4),32*c5-ny+32);c7<=min(min(min(min(32*c6-1,tmax-1),32*c4+31),32*c5+30),32*c6-nx+31);c7++) {
for (c9=32*c6;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]);} ;
}
for (c8=c7+1;c8<=32*c5+31;c8++) {
for (c9=32*c6;c9<=c7+nx-1;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
{hz[nx-1][-c7+c8-1]=hz[nx-1][-c7+c8-1]-((double)(7))/10*(ey[1+nx-1][-c7+c8-1]+ex[nx-1][1+-c7+c8-1]-ex[nx-1][-c7+c8-1]-ey[nx-1][-c7+c8-1]);} ;
}
}
if (ny >= 2) {
for (c7=max(max(max(32*c5,32*c6-nx+32),0),32*c4);c7<=min(min(min(32*c6-1,tmax-1),32*c4+31),32*c5-ny+31);c7++) {
for (c9=32*c6;c9<=32*c6+31;c9++) {
{ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]);} ;
}
for (c8=c7+1;c8<=c7+ny-1;c8++) {
for (c9=32*c6;c9<=32*c6+31;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
}
for (c9=32*c6;c9<=32*c6+31;c9++) {
{hz[-c7+c9-1][ny-1]=hz[-c7+c9-1][ny-1]-((double)(7))/10*(ey[1+-c7+c9-1][ny-1]+ex[-c7+c9-1][1+ny-1]-ex[-c7+c9-1][ny-1]-ey[-c7+c9-1][ny-1]);} ;
}
}
}
for (c7=max(max(max(0,32*c4),32*c6-nx+32),32*c5-ny+32);c7<=min(min(min(32*c6-1,tmax-1),32*c4+31),32*c5-1);c7++) {
/*@ begin Loop(
transform UnrollJam(ufactor=4)
for (c8=32*c5;c8<=32*c5+31;c8++) {
transform UnrollJam(ufactor=4)
for (c9=32*c6;c9<=32*c6+31;c9++) {
ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);
ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);
hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);
}
}
)@*/ {
for (c8=32*c5; c8<=32*c5+28; c8=c8+4) {
for (c9=32*c6; c9<=32*c6+28; c9=c9+4) {
ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);
ey[-c7+c9][-c7+c8+1]=ey[-c7+c9][-c7+c8+1]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+1]-hz[-c7+c9-1][-c7+c8+1]);
ey[-c7+c9][-c7+c8+2]=ey[-c7+c9][-c7+c8+2]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+2]-hz[-c7+c9-1][-c7+c8+2]);
ey[-c7+c9][-c7+c8+3]=ey[-c7+c9][-c7+c8+3]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+3]-hz[-c7+c9-1][-c7+c8+3]);
ey[-c7+c9+1][-c7+c8]=ey[-c7+c9+1][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+1][-c7+c8]-hz[-c7+c9][-c7+c8]);
ey[-c7+c9+1][-c7+c8+1]=ey[-c7+c9+1][-c7+c8+1]-0.5*((double)(1))*(hz[-c7+c9+1][-c7+c8+1]-hz[-c7+c9][-c7+c8+1]);
ey[-c7+c9+1][-c7+c8+2]=ey[-c7+c9+1][-c7+c8+2]-0.5*((double)(1))*(hz[-c7+c9+1][-c7+c8+2]-hz[-c7+c9][-c7+c8+2]);
ey[-c7+c9+1][-c7+c8+3]=ey[-c7+c9+1][-c7+c8+3]-0.5*((double)(1))*(hz[-c7+c9+1][-c7+c8+3]-hz[-c7+c9][-c7+c8+3]);
ey[-c7+c9+2][-c7+c8]=ey[-c7+c9+2][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+2][-c7+c8]-hz[-c7+c9+1][-c7+c8]);
ey[-c7+c9+2][-c7+c8+1]=ey[-c7+c9+2][-c7+c8+1]-0.5*((double)(1))*(hz[-c7+c9+2][-c7+c8+1]-hz[-c7+c9+1][-c7+c8+1]);
ey[-c7+c9+2][-c7+c8+2]=ey[-c7+c9+2][-c7+c8+2]-0.5*((double)(1))*(hz[-c7+c9+2][-c7+c8+2]-hz[-c7+c9+1][-c7+c8+2]);
ey[-c7+c9+2][-c7+c8+3]=ey[-c7+c9+2][-c7+c8+3]-0.5*((double)(1))*(hz[-c7+c9+2][-c7+c8+3]-hz[-c7+c9+1][-c7+c8+3]);
ey[-c7+c9+3][-c7+c8]=ey[-c7+c9+3][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+3][-c7+c8]-hz[-c7+c9+2][-c7+c8]);
ey[-c7+c9+3][-c7+c8+1]=ey[-c7+c9+3][-c7+c8+1]-0.5*((double)(1))*(hz[-c7+c9+3][-c7+c8+1]-hz[-c7+c9+2][-c7+c8+1]);
ey[-c7+c9+3][-c7+c8+2]=ey[-c7+c9+3][-c7+c8+2]-0.5*((double)(1))*(hz[-c7+c9+3][-c7+c8+2]-hz[-c7+c9+2][-c7+c8+2]);
ey[-c7+c9+3][-c7+c8+3]=ey[-c7+c9+3][-c7+c8+3]-0.5*((double)(1))*(hz[-c7+c9+3][-c7+c8+3]-hz[-c7+c9+2][-c7+c8+3]);
ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);
ex[-c7+c9][-c7+c8+1]=ex[-c7+c9][-c7+c8+1]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+1]-hz[-c7+c9][-c7+c8]);
ex[-c7+c9][-c7+c8+2]=ex[-c7+c9][-c7+c8+2]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+2]-hz[-c7+c9][-c7+c8+1]);
ex[-c7+c9][-c7+c8+3]=ex[-c7+c9][-c7+c8+3]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+3]-hz[-c7+c9][-c7+c8+2]);
ex[-c7+c9+1][-c7+c8]=ex[-c7+c9+1][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+1][-c7+c8]-hz[-c7+c9+1][-c7+c8-1]);
ex[-c7+c9+1][-c7+c8+1]=ex[-c7+c9+1][-c7+c8+1]-0.5*((double)(1))*(hz[-c7+c9+1][-c7+c8+1]-hz[-c7+c9+1][-c7+c8]);
ex[-c7+c9+1][-c7+c8+2]=ex[-c7+c9+1][-c7+c8+2]-0.5*((double)(1))*(hz[-c7+c9+1][-c7+c8+2]-hz[-c7+c9+1][-c7+c8+1]);
ex[-c7+c9+1][-c7+c8+3]=ex[-c7+c9+1][-c7+c8+3]-0.5*((double)(1))*(hz[-c7+c9+1][-c7+c8+3]-hz[-c7+c9+1][-c7+c8+2]);
ex[-c7+c9+2][-c7+c8]=ex[-c7+c9+2][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+2][-c7+c8]-hz[-c7+c9+2][-c7+c8-1]);
ex[-c7+c9+2][-c7+c8+1]=ex[-c7+c9+2][-c7+c8+1]-0.5*((double)(1))*(hz[-c7+c9+2][-c7+c8+1]-hz[-c7+c9+2][-c7+c8]);
ex[-c7+c9+2][-c7+c8+2]=ex[-c7+c9+2][-c7+c8+2]-0.5*((double)(1))*(hz[-c7+c9+2][-c7+c8+2]-hz[-c7+c9+2][-c7+c8+1]);
ex[-c7+c9+2][-c7+c8+3]=ex[-c7+c9+2][-c7+c8+3]-0.5*((double)(1))*(hz[-c7+c9+2][-c7+c8+3]-hz[-c7+c9+2][-c7+c8+2]);
ex[-c7+c9+3][-c7+c8]=ex[-c7+c9+3][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+3][-c7+c8]-hz[-c7+c9+3][-c7+c8-1]);
ex[-c7+c9+3][-c7+c8+1]=ex[-c7+c9+3][-c7+c8+1]-0.5*((double)(1))*(hz[-c7+c9+3][-c7+c8+1]-hz[-c7+c9+3][-c7+c8]);
ex[-c7+c9+3][-c7+c8+2]=ex[-c7+c9+3][-c7+c8+2]-0.5*((double)(1))*(hz[-c7+c9+3][-c7+c8+2]-hz[-c7+c9+3][-c7+c8+1]);
ex[-c7+c9+3][-c7+c8+3]=ex[-c7+c9+3][-c7+c8+3]-0.5*((double)(1))*(hz[-c7+c9+3][-c7+c8+3]-hz[-c7+c9+3][-c7+c8+2]);
hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-0.1*((double)(7))*(ey[-c7+c9][-c7+c8-1]+ex[-c7+c9-1][-c7+c8]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);
hz[-c7+c9-1][-c7+c8]=hz[-c7+c9-1][-c7+c8]-0.1*((double)(7))*(ey[-c7+c9][-c7+c8]+ex[-c7+c9-1][-c7+c8+1]-ex[-c7+c9-1][-c7+c8]-ey[-c7+c9-1][-c7+c8]);
hz[-c7+c9-1][-c7+c8+1]=hz[-c7+c9-1][-c7+c8+1]-0.1*((double)(7))*(ey[-c7+c9][-c7+c8+1]+ex[-c7+c9-1][-c7+c8+2]-ex[-c7+c9-1][-c7+c8+1]-ey[-c7+c9-1][-c7+c8+1]);
hz[-c7+c9-1][-c7+c8+2]=hz[-c7+c9-1][-c7+c8+2]-0.1*((double)(7))*(ey[-c7+c9][-c7+c8+2]+ex[-c7+c9-1][-c7+c8+3]-ex[-c7+c9-1][-c7+c8+2]-ey[-c7+c9-1][-c7+c8+2]);
hz[-c7+c9][-c7+c8-1]=hz[-c7+c9][-c7+c8-1]-0.1*((double)(7))*(ey[-c7+c9+1][-c7+c8-1]+ex[-c7+c9][-c7+c8]-ex[-c7+c9][-c7+c8-1]-ey[-c7+c9][-c7+c8-1]);
hz[-c7+c9][-c7+c8]=hz[-c7+c9][-c7+c8]-0.1*((double)(7))*(ey[-c7+c9+1][-c7+c8]+ex[-c7+c9][-c7+c8+1]-ex[-c7+c9][-c7+c8]-ey[-c7+c9][-c7+c8]);
hz[-c7+c9][-c7+c8+1]=hz[-c7+c9][-c7+c8+1]-0.1*((double)(7))*(ey[-c7+c9+1][-c7+c8+1]+ex[-c7+c9][-c7+c8+2]-ex[-c7+c9][-c7+c8+1]-ey[-c7+c9][-c7+c8+1]);
hz[-c7+c9][-c7+c8+2]=hz[-c7+c9][-c7+c8+2]-0.1*((double)(7))*(ey[-c7+c9+1][-c7+c8+2]+ex[-c7+c9][-c7+c8+3]-ex[-c7+c9][-c7+c8+2]-ey[-c7+c9][-c7+c8+2]);
hz[-c7+c9+1][-c7+c8-1]=hz[-c7+c9+1][-c7+c8-1]-0.1*((double)(7))*(ey[-c7+c9+2][-c7+c8-1]+ex[-c7+c9+1][-c7+c8]-ex[-c7+c9+1][-c7+c8-1]-ey[-c7+c9+1][-c7+c8-1]);
hz[-c7+c9+1][-c7+c8]=hz[-c7+c9+1][-c7+c8]-0.1*((double)(7))*(ey[-c7+c9+2][-c7+c8]+ex[-c7+c9+1][-c7+c8+1]-ex[-c7+c9+1][-c7+c8]-ey[-c7+c9+1][-c7+c8]);
hz[-c7+c9+1][-c7+c8+1]=hz[-c7+c9+1][-c7+c8+1]-0.1*((double)(7))*(ey[-c7+c9+2][-c7+c8+1]+ex[-c7+c9+1][-c7+c8+2]-ex[-c7+c9+1][-c7+c8+1]-ey[-c7+c9+1][-c7+c8+1]);
hz[-c7+c9+1][-c7+c8+2]=hz[-c7+c9+1][-c7+c8+2]-0.1*((double)(7))*(ey[-c7+c9+2][-c7+c8+2]+ex[-c7+c9+1][-c7+c8+3]-ex[-c7+c9+1][-c7+c8+2]-ey[-c7+c9+1][-c7+c8+2]);
hz[-c7+c9+2][-c7+c8-1]=hz[-c7+c9+2][-c7+c8-1]-0.1*((double)(7))*(ey[-c7+c9+3][-c7+c8-1]+ex[-c7+c9+2][-c7+c8]-ex[-c7+c9+2][-c7+c8-1]-ey[-c7+c9+2][-c7+c8-1]);
hz[-c7+c9+2][-c7+c8]=hz[-c7+c9+2][-c7+c8]-0.1*((double)(7))*(ey[-c7+c9+3][-c7+c8]+ex[-c7+c9+2][-c7+c8+1]-ex[-c7+c9+2][-c7+c8]-ey[-c7+c9+2][-c7+c8]);
hz[-c7+c9+2][-c7+c8+1]=hz[-c7+c9+2][-c7+c8+1]-0.1*((double)(7))*(ey[-c7+c9+3][-c7+c8+1]+ex[-c7+c9+2][-c7+c8+2]-ex[-c7+c9+2][-c7+c8+1]-ey[-c7+c9+2][-c7+c8+1]);
hz[-c7+c9+2][-c7+c8+2]=hz[-c7+c9+2][-c7+c8+2]-0.1*((double)(7))*(ey[-c7+c9+3][-c7+c8+2]+ex[-c7+c9+2][-c7+c8+3]-ex[-c7+c9+2][-c7+c8+2]-ey[-c7+c9+2][-c7+c8+2]);
}
for (; c9<=32*c6+31; c9=c9+1) {
ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);
ey[-c7+c9][-c7+c8+1]=ey[-c7+c9][-c7+c8+1]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+1]-hz[-c7+c9-1][-c7+c8+1]);
ey[-c7+c9][-c7+c8+2]=ey[-c7+c9][-c7+c8+2]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+2]-hz[-c7+c9-1][-c7+c8+2]);
ey[-c7+c9][-c7+c8+3]=ey[-c7+c9][-c7+c8+3]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+3]-hz[-c7+c9-1][-c7+c8+3]);
ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);
ex[-c7+c9][-c7+c8+1]=ex[-c7+c9][-c7+c8+1]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+1]-hz[-c7+c9][-c7+c8]);
ex[-c7+c9][-c7+c8+2]=ex[-c7+c9][-c7+c8+2]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+2]-hz[-c7+c9][-c7+c8+1]);
ex[-c7+c9][-c7+c8+3]=ex[-c7+c9][-c7+c8+3]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8+3]-hz[-c7+c9][-c7+c8+2]);
hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-0.1*((double)(7))*(ey[-c7+c9][-c7+c8-1]+ex[-c7+c9-1][-c7+c8]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);
hz[-c7+c9-1][-c7+c8]=hz[-c7+c9-1][-c7+c8]-0.1*((double)(7))*(ey[-c7+c9][-c7+c8]+ex[-c7+c9-1][-c7+c8+1]-ex[-c7+c9-1][-c7+c8]-ey[-c7+c9-1][-c7+c8]);
hz[-c7+c9-1][-c7+c8+1]=hz[-c7+c9-1][-c7+c8+1]-0.1*((double)(7))*(ey[-c7+c9][-c7+c8+1]+ex[-c7+c9-1][-c7+c8+2]-ex[-c7+c9-1][-c7+c8+1]-ey[-c7+c9-1][-c7+c8+1]);
hz[-c7+c9-1][-c7+c8+2]=hz[-c7+c9-1][-c7+c8+2]-0.1*((double)(7))*(ey[-c7+c9][-c7+c8+2]+ex[-c7+c9-1][-c7+c8+3]-ex[-c7+c9-1][-c7+c8+2]-ey[-c7+c9-1][-c7+c8+2]);
}
}
for (; c8<=32*c5+31; c8=c8+1) {
{
for (c9=32*c6; c9<=32*c6+28; c9=c9+4) {
ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);
ey[-c7+c9+1][-c7+c8]=ey[-c7+c9+1][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+1][-c7+c8]-hz[-c7+c9][-c7+c8]);
ey[-c7+c9+2][-c7+c8]=ey[-c7+c9+2][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+2][-c7+c8]-hz[-c7+c9+1][-c7+c8]);
ey[-c7+c9+3][-c7+c8]=ey[-c7+c9+3][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+3][-c7+c8]-hz[-c7+c9+2][-c7+c8]);
ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);
ex[-c7+c9+1][-c7+c8]=ex[-c7+c9+1][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+1][-c7+c8]-hz[-c7+c9+1][-c7+c8-1]);
ex[-c7+c9+2][-c7+c8]=ex[-c7+c9+2][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+2][-c7+c8]-hz[-c7+c9+2][-c7+c8-1]);
ex[-c7+c9+3][-c7+c8]=ex[-c7+c9+3][-c7+c8]-0.5*((double)(1))*(hz[-c7+c9+3][-c7+c8]-hz[-c7+c9+3][-c7+c8-1]);
hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-0.1*((double)(7))*(ey[-c7+c9][-c7+c8-1]+ex[-c7+c9-1][-c7+c8]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);
hz[-c7+c9][-c7+c8-1]=hz[-c7+c9][-c7+c8-1]-0.1*((double)(7))*(ey[-c7+c9+1][-c7+c8-1]+ex[-c7+c9][-c7+c8]-ex[-c7+c9][-c7+c8-1]-ey[-c7+c9][-c7+c8-1]);
hz[-c7+c9+1][-c7+c8-1]=hz[-c7+c9+1][-c7+c8-1]-0.1*((double)(7))*(ey[-c7+c9+2][-c7+c8-1]+ex[-c7+c9+1][-c7+c8]-ex[-c7+c9+1][-c7+c8-1]-ey[-c7+c9+1][-c7+c8-1]);
hz[-c7+c9+2][-c7+c8-1]=hz[-c7+c9+2][-c7+c8-1]-0.1*((double)(7))*(ey[-c7+c9+3][-c7+c8-1]+ex[-c7+c9+2][-c7+c8]-ex[-c7+c9+2][-c7+c8-1]-ey[-c7+c9+2][-c7+c8-1]);
}
for (; c9<=32*c6+31; c9=c9+1) {
ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);
ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);
hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);
}
}
}
}
/*@ end @*/
}
for (c7=max(max(max(max(32*c5,32*c6-nx+32),0),32*c4),32*c5-ny+32);c7<=min(min(min(32*c6-1,tmax-1),32*c4+31),32*c5+30);c7++) {
for (c9=32*c6;c9<=32*c6+31;c9++) {
{ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]);} ;
}
for (c8=c7+1;c8<=32*c5+31;c8++) {
for (c9=32*c6;c9<=32*c6+31;c9++) {
{ey[-c7+c9][-c7+c8]=ey[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9-1][-c7+c8]);} ;
{ex[-c7+c9][-c7+c8]=ex[-c7+c9][-c7+c8]-((double)(1))/2*(hz[-c7+c9][-c7+c8]-hz[-c7+c9][-c7+c8-1]);} ;
{hz[-c7+c9-1][-c7+c8-1]=hz[-c7+c9-1][-c7+c8-1]-((double)(7))/10*(ey[1+-c7+c9-1][-c7+c8-1]+ex[-c7+c9-1][1+-c7+c8-1]-ex[-c7+c9-1][-c7+c8-1]-ey[-c7+c9-1][-c7+c8-1]);} ;
}
}
}
if (ny == 1) {
for (c7=max(max(max(32*c6-nx+1,0),32*c5),32*c4);c7<=min(min(min(32*c6-1,tmax-1),32*c5+30),32*c4+31);c7++) {
for (c9=32*c6;c9<=min(c7+nx-1,32*c6+31);c9++) {
{ey[-c7+c9][0]=ey[-c7+c9][0]-((double)(1))/2*(hz[-c7+c9][0]-hz[-c7+c9-1][0]);} ;
}
for (c9=32*c6;c9<=min(c7+nx,32*c6+31);c9++) {
{hz[-c7+c9-1][0]=hz[-c7+c9-1][0]-((double)(7))/10*(ey[1+-c7+c9-1][0]+ex[-c7+c9-1][1+0]-ex[-c7+c9-1][0]-ey[-c7+c9-1][0]);} ;
}
}
}
if ((c1 == c2+c3) && (c4 == c6) && (c5 <= min(floord(tmax-32,32),floord(32*c6-1,32))) && (nx >= 2) && (ny == 1)) {
{ey[0][0]=32*c5+31;} ;
for (c9=32*c5+32;c9<=min(32*c5+nx+30,32*c6+31);c9++) {
{ey[-32*c5+c9-31][0]=ey[-32*c5+c9-31][0]-((double)(1))/2*(hz[-32*c5+c9-31][0]-hz[-32*c5+c9-31 -1][0]);} ;
}
}
if ((c1 == c2+c3) && (c4 == c6) && (c5 <= min(floord(tmax-32,32),floord(32*c6-1,32))) && (nx >= 2) && (ny >= 2)) {
{ey[0][0]=32*c5+31;} ;
for (c9=32*c5+32;c9<=min(32*c5+nx+30,32*c6+31);c9++) {
{ey[-32*c5+c9-31][0]=ey[-32*c5+c9-31][0]-((double)(1))/2*(hz[-32*c5+c9-31][0]-hz[-32*c5+c9-31 -1][0]);} ;
}
}
if ((c1 == c2+c3) && (-c4 == -c6) && (c4 >= ceild(32*c5-31,32)) && (c4 <= min(floord(tmax-32,32),floord(32*c5-1,32))) && (nx == 1) && (ny >= 2)) {
{ey[0][0]=32*c4+31;} ;
for (c8=32*c4+32;c8<=min(32*c4+ny+30,32*c5+31);c8++) {
{ey[0][-32*c4+c8-31]=32*c4+31;} ;
{ex[0][-32*c4+c8-31]=ex[0][-32*c4+c8-31]-((double)(1))/2*(hz[0][-32*c4+c8-31]-hz[0][-32*c4+c8-31 -1]);} ;
}
}
if ((c1 == c2+c3) && (-c4 == -c6) && (c4 <= min(floord(tmax-32,32),c5-1)) && (nx == 1)) {
for (c8=32*c5;c8<=min(32*c4+ny+30,32*c5+31);c8++) {
{ey[0][-32*c4+c8-31]=32*c4+31;} ;
{ex[0][-32*c4+c8-31]=ex[0][-32*c4+c8-31]-((double)(1))/2*(hz[0][-32*c4+c8-31]-hz[0][-32*c4+c8-31 -1]);} ;
}
}
if ((c1 == c2+c3) && (c4 == c6) && (c5 <= min(floord(tmax-32,32),c6)) && (nx == 1) && (ny == 1)) {
{ey[0][0]=32*c5+31;} ;
}
if ((c1 == c2+c3) && (-c4 == -c6) && (c4 <= min(floord(tmax-32,32),floord(32*c5-1,32))) && (nx == 1) && (ny == 1)) {
{ey[0][0]=32*c4+31;} ;
}
if ((c1 == c2+c3) && (c4 == c6) && (c5 <= min(floord(tmax-32,32),c6)) && (nx == 1) && (ny >= 2)) {
{ey[0][0]=32*c5+31;} ;
}
if ((c1 == c2+c3) && (-c4 == -c6) && (c4 >= ceild(32*c5-31,32)) && (c4 <= min(floord(tmax-32,32),floord(32*c5-1,32))) && (nx >= 2) && (ny >= 2)) {
{ey[0][0]=32*c4+31;} ;
for (c8=32*c4+32;c8<=min(32*c4+ny+30,32*c5+31);c8++) {
{ey[0][-32*c4+c8-31]=32*c4+31;} ;
{ex[0][-32*c4+c8-31]=ex[0][-32*c4+c8-31]-((double)(1))/2*(hz[0][-32*c4+c8-31]-hz[0][-32*c4+c8-31 -1]);} ;
}
}
if ((c1 == c2+c3) && (-c4 == -c6) && (c4 <= min(floord(tmax-32,32),c5-1)) && (nx >= 2)) {
for (c8=32*c5;c8<=min(32*c4+ny+30,32*c5+31);c8++) {
{ey[0][-32*c4+c8-31]=32*c4+31;} ;
{ex[0][-32*c4+c8-31]=ex[0][-32*c4+c8-31]-((double)(1))/2*(hz[0][-32*c4+c8-31]-hz[0][-32*c4+c8-31 -1]);} ;
}
}
if ((c1 == c2+c3) && (-c4 == -c6) && (c4 <= min(floord(tmax-32,32),c5)) && (nx >= 2) && (ny == 1)) {
{ey[0][0]=32*c4+31;} ;
}
if ((c1 == c2+c3) && (-c4 == -c5) && (-c4 == -c6) && (c4 <= floord(tmax-32,32)) && (nx >= 2) && (ny >= 2)) {
{ey[0][0]=32*c4+31;} ;
}
if ((c4 >= c5) && (c5 <= min(c6-1,floord(tmax-32,32))) && (ny == 1)) {
for (c9=32*c6;c9<=min(32*c5+nx+30,32*c6+31);c9++) {
{ey[-32*c5+c9-31][0]=ey[-32*c5+c9-31][0]-((double)(1))/2*(hz[-32*c5+c9-31][0]-hz[-32*c5+c9-31 -1][0]);} ;
}
}
if ((c4 >= c5) && (c5 <= min(c6-1,floord(tmax-32,32))) && (ny >= 2)) {
for (c9=32*c6;c9<=min(32*c5+nx+30,32*c6+31);c9++) {
{ey[-32*c5+c9-31][0]=ey[-32*c5+c9-31][0]-((double)(1))/2*(hz[-32*c5+c9-31][0]-hz[-32*c5+c9-31 -1][0]);} ;
}
}
}
}
}
}
}
}
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
#ifndef TEST
printf("%f\n", annot_t_total);
#else
{
int i,j;
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
if (j%100==0)
printf("\n");
printf("%f ",hz[i][j]);
}
printf("\n");
}
}
#endif
return ((int) hz[0][0]);
}
|
cp-tree.h | /* Definitions for C++ parsing and type checking.
Copyright (C) 1987-2015 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_CP_TREE_H
#define GCC_CP_TREE_H
#include "ggc.h"
#include "hashtab.h"
#include "hash-set.h"
#include "vec.h"
#include "machmode.h"
#include "tm.h"
#include "hard-reg-set.h"
#include "input.h"
#include "function.h"
#include "hash-map.h"
/* In order for the format checking to accept the C++ front end
diagnostic framework extensions, you must include this file before
diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE
in c-common.h. */
#undef GCC_DIAG_STYLE
#define GCC_DIAG_STYLE __gcc_cxxdiag__
#if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H)
#error \
In order for the format checking to accept the C++ front end diagnostic \
framework extensions, you must include this file before diagnostic-core.h and \
c-common.h, not after.
#endif
#include "c-family/c-common.h"
#include "diagnostic.h"
#include "name-lookup.h"
/* Usage of TREE_LANG_FLAG_?:
0: IDENTIFIER_MARKED (IDENTIFIER_NODEs)
NEW_EXPR_USE_GLOBAL (in NEW_EXPR).
DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR).
COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR).
CLEANUP_P (in TRY_BLOCK)
AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR)
PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF)
PAREN_STRING_LITERAL (in STRING_CST)
DECL_GNU_TLS_P (in VAR_DECL)
KOENIG_LOOKUP_P (in CALL_EXPR)
STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST).
EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT)
STMT_EXPR_NO_SCOPE (in STMT_EXPR)
BIND_EXPR_TRY_BLOCK (in BIND_EXPR)
TYPENAME_IS_ENUM_P (in TYPENAME_TYPE)
OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD and OMP_DISTRIBUTE)
BASELINK_QUALIFIED_P (in BASELINK)
TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR)
TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX)
TREE_INDIRECT_USING (in a TREE_LIST of using-directives)
ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute)
ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag)
CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR)
LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR)
DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE)
VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR)
DECL_OVERRIDE_P (in FUNCTION_DECL)
IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR)
TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR)
CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR)
OVL_ARG_DEPENDENT (in OVERLOAD)
PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION)
TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO)
SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR)
BLOCK_OUTER_CURLY_BRACE_P (in BLOCK)
1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
DELETE_EXPR_USE_VEC (in DELETE_EXPR).
(TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out).
ICS_ELLIPSIS_FLAG (in _CONV)
DECL_INITIALIZED_P (in VAR_DECL)
TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
STMT_IS_FULL_EXPR_P (in _STMT)
TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR)
LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR)
DECL_FINAL_P (in FUNCTION_DECL)
QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
DECLTYPE_FOR_INIT_CAPTURE (in DECLTYPE_TYPE)
CONSTRUCTOR_NO_IMPLICIT_ZERO (in CONSTRUCTOR)
TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO)
PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION)
2: IDENTIFIER_OPNAME_P (in IDENTIFIER_NODE)
ICS_THIS_FLAG (in _CONV)
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL)
STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST)
TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE)
TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR)
FNDECL_USED_AUTO (in FUNCTION_DECL)
DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE)
REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF)
AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR)
CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR)
3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out).
ICS_BAD_FLAG (in _CONV)
FN_TRY_BLOCK_P (in TRY_BLOCK)
IDENTIFIER_CTOR_OR_DTOR_P (in IDENTIFIER_NODE)
BIND_EXPR_BODY_BLOCK (in BIND_EXPR)
DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL)
CALL_EXPR_LIST_INIT_P (in CALL_EXPR, AGGR_INIT_EXPR)
4: TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR,
or FIELD_DECL).
IDENTIFIER_TYPENAME_P (in IDENTIFIER_NODE)
DECL_TINFO_P (in VAR_DECL)
FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
5: C_IS_RESERVED_WORD (in IDENTIFIER_NODE)
DECL_VTABLE_OR_VTT_P (in VAR_DECL)
FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE)
DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL)
TYPE_MARKED_P (in _TYPE)
RANGE_FOR_IVDEP (in RANGE_FOR_STMT)
Usage of TYPE_LANG_FLAG_?:
0: TYPE_DEPENDENT_P
1: TYPE_HAS_USER_CONSTRUCTOR.
2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE)
TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE)
3: TYPE_FOR_JAVA.
4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR
5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE)
ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE)
AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM)
REFERENCE_VLA_OK (in REFERENCE_TYPE)
6: TYPE_DEPENDENT_P_VALID
Usage of DECL_LANG_FLAG_?:
0: DECL_ERROR_REPORTED (in VAR_DECL).
DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL)
DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL)
DECL_MUTABLE_P (in FIELD_DECL)
DECL_DEPENDENT_P (in USING_DECL)
LABEL_DECL_BREAK (in LABEL_DECL)
1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL).
DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL)
DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL)
USING_DECL_TYPENAME_P (in USING_DECL)
DECL_VLA_CAPTURE_P (in FIELD_DECL)
DECL_ARRAY_PARAMETER_P (in PARM_DECL)
LABEL_DECL_CONTINUE (in LABEL_DECL)
2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL).
DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL)
3: DECL_IN_AGGR_P.
4: DECL_C_BIT_FIELD (in a FIELD_DECL)
DECL_ANON_UNION_VAR_P (in a VAR_DECL)
DECL_SELF_REFERENCE_P (in a TYPE_DECL)
DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL)
5: DECL_INTERFACE_KNOWN.
6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL).
DECL_FIELD_IS_BASE (in FIELD_DECL)
TYPE_DECL_ALIAS_P (in TYPE_DECL)
7: DECL_DEAD_FOR_LOCAL (in VAR_DECL).
DECL_THUNK_P (in a member FUNCTION_DECL)
DECL_NORMAL_CAPTURE_P (in FIELD_DECL)
8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL)
Usage of language-independent fields in a language-dependent manner:
TYPE_ALIAS_SET
This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so
forth as a substitute for the mark bits provided in `lang_type'.
At present, only the six low-order bits are used.
TYPE_LANG_SLOT_1
For an ENUMERAL_TYPE, this is ENUM_TEMPLATE_INFO.
For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS
BINFO_VIRTUALS
For a binfo, this is a TREE_LIST. There is an entry for each
virtual function declared either in BINFO or its direct and
indirect primary bases.
The BV_DELTA of each node gives the amount by which to adjust the
`this' pointer when calling the function. If the method is an
overridden version of a base class method, then it is assumed
that, prior to adjustment, the this pointer points to an object
of the base class.
The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable
index of the vcall offset for this entry.
The BV_FN is the declaration for the virtual function itself.
If BV_LOST_PRIMARY is set, it means that this entry is for a lost
primary virtual base and can be left null in the vtable.
BINFO_VTABLE
This is an expression with POINTER_TYPE that gives the value
to which the vptr should be initialized. Use get_vtbl_decl_for_binfo
to extract the VAR_DECL for the complete vtable.
DECL_VINDEX
This field is NULL for a non-virtual function. For a virtual
function, it is eventually set to an INTEGER_CST indicating the
index in the vtable at which this function can be found. When
a virtual function is declared, but before it is known what
function is overridden, this field is the error_mark_node.
Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is
the virtual function this one overrides, and whose TREE_CHAIN is
the old DECL_VINDEX. */
/* Language-specific tree checkers. */
#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == FUNCTION_DECL)
#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL)
#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \
TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define THUNK_FUNCTION_CHECK(NODE) __extension__ \
({ __typeof (NODE) const __t = (NODE); \
if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \
|| !__t->decl_common.lang_specific->u.fn.thunk_p) \
tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
__t; })
#else
#define THUNK_FUNCTION_CHECK(NODE) (NODE)
#endif
/* Language-dependent contents of an identifier. */
struct GTY(()) lang_identifier {
struct c_common_identifier c_common;
cxx_binding *namespace_bindings;
cxx_binding *bindings;
tree class_template_info;
tree label_value;
};
/* Return a typed pointer version of T if it designates a
C++ front-end identifier. */
inline lang_identifier*
identifier_p (tree t)
{
if (TREE_CODE (t) == IDENTIFIER_NODE)
return (lang_identifier*) t;
return NULL;
}
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_5 (ID)
#define LANG_IDENTIFIER_CAST(NODE) \
((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE))
struct GTY(()) template_parm_index_s {
struct tree_common common;
int index;
int level;
int orig_level;
tree decl;
};
typedef struct template_parm_index_s template_parm_index;
struct GTY(()) ptrmem_cst {
struct tree_common common;
tree member;
};
typedef struct ptrmem_cst * ptrmem_cst_t;
#define IDENTIFIER_GLOBAL_VALUE(NODE) \
namespace_binding ((NODE), global_namespace)
#define SET_IDENTIFIER_GLOBAL_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), global_namespace, (VAL))
#define IDENTIFIER_NAMESPACE_VALUE(NODE) \
namespace_binding ((NODE), current_namespace)
#define SET_IDENTIFIER_NAMESPACE_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), current_namespace, (VAL))
#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE))
#define BIND_EXPR_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE))
/* Used to mark the block around the member initializers and cleanups. */
#define BIND_EXPR_BODY_BLOCK(NODE) \
TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE))
#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \
(DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \
|| LAMBDA_FUNCTION_P (NODE))
#define STATEMENT_LIST_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE))
#define STATEMENT_LIST_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE))
/* Mark the outer curly brace BLOCK. */
#define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE))
/* Nonzero if this statement should be considered a full-expression,
i.e., if temporaries created during this statement should have
their destructors run at the end of this statement. */
#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE))
/* Marks the result of a statement expression. */
#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \
TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE))
/* Nonzero if this statement-expression does not have an associated scope. */
#define STMT_EXPR_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE))
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
sense of `same'. */
#define same_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
/* Returns nonzero iff NODE is a declaration for the global function
`main'. */
#define DECL_MAIN_P(NODE) \
(DECL_EXTERN_C_FUNCTION_P (NODE) \
&& DECL_NAME (NODE) != NULL_TREE \
&& MAIN_NAME_P (DECL_NAME (NODE)) \
&& flag_hosted)
/* The overloaded FUNCTION_DECL. */
#define OVL_FUNCTION(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->function)
#define OVL_CHAIN(NODE) TREE_CHAIN (NODE)
/* Polymorphic access to FUNCTION and CHAIN. */
#define OVL_CURRENT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? OVL_FUNCTION (NODE) : (NODE))
#define OVL_NEXT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? TREE_CHAIN (NODE) : NULL_TREE)
/* If set, this was imported in a using declaration.
This is not to confuse with being used somewhere, which
is not important for this node. */
#define OVL_USED(NODE) TREE_USED (OVERLOAD_CHECK (NODE))
/* If set, this OVERLOAD was created for argument-dependent lookup
and can be freed afterward. */
#define OVL_ARG_DEPENDENT(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE))
struct GTY(()) tree_overload {
struct tree_common common;
tree function;
};
struct GTY(()) tree_template_decl {
struct tree_decl_common common;
tree arguments;
tree result;
};
/* Returns true iff NODE is a BASELINK. */
#define BASELINK_P(NODE) \
(TREE_CODE (NODE) == BASELINK)
/* The BINFO indicating the base in which lookup found the
BASELINK_FUNCTIONS. */
#define BASELINK_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo)
/* The functions referred to by the BASELINK; either a FUNCTION_DECL,
a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */
#define BASELINK_FUNCTIONS(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->functions)
/* The BINFO in which the search for the functions indicated by this baselink
began. This base is used to determine the accessibility of functions
selected by overload resolution. */
#define BASELINK_ACCESS_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo)
/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type
to which the conversion should occur. This value is important if
the BASELINK_FUNCTIONS include a template conversion operator --
the BASELINK_OPTYPE can be used to determine what type the user
requested. */
#define BASELINK_OPTYPE(NODE) \
(TREE_CHAIN (BASELINK_CHECK (NODE)))
/* Nonzero if this baselink was from a qualified lookup. */
#define BASELINK_QUALIFIED_P(NODE) \
TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE))
struct GTY(()) tree_baselink {
struct tree_common common;
tree binfo;
tree functions;
tree access_binfo;
};
/* The different kinds of ids that we encounter. */
typedef enum cp_id_kind
{
/* Not an id at all. */
CP_ID_KIND_NONE,
/* An unqualified-id that is not a template-id. */
CP_ID_KIND_UNQUALIFIED,
/* An unqualified-id that is a dependent name. */
CP_ID_KIND_UNQUALIFIED_DEPENDENT,
/* An unqualified template-id. */
CP_ID_KIND_TEMPLATE_ID,
/* A qualified-id. */
CP_ID_KIND_QUALIFIED
} cp_id_kind;
/* The various kinds of C++0x warnings we encounter. */
typedef enum cpp0x_warn_str
{
/* extended initializer lists */
CPP0X_INITIALIZER_LISTS,
/* explicit conversion operators */
CPP0X_EXPLICIT_CONVERSION,
/* variadic templates */
CPP0X_VARIADIC_TEMPLATES,
/* lambda expressions */
CPP0X_LAMBDA_EXPR,
/* C++0x auto */
CPP0X_AUTO,
/* scoped enums */
CPP0X_SCOPED_ENUMS,
/* defaulted and deleted functions */
CPP0X_DEFAULTED_DELETED,
/* inline namespaces */
CPP0X_INLINE_NAMESPACES,
/* override controls, override/final */
CPP0X_OVERRIDE_CONTROLS,
/* non-static data member initializers */
CPP0X_NSDMI,
/* user defined literals */
CPP0X_USER_DEFINED_LITERALS,
/* delegating constructors */
CPP0X_DELEGATING_CTORS,
/* inheriting constructors */
CPP0X_INHERITING_CTORS,
/* C++11 attributes */
CPP0X_ATTRIBUTES,
/* ref-qualified member functions */
CPP0X_REF_QUALIFIER
} cpp0x_warn_str;
/* The various kinds of operation used by composite_pointer_type. */
typedef enum composite_pointer_operation
{
/* comparison */
CPO_COMPARISON,
/* conversion */
CPO_CONVERSION,
/* conditional expression */
CPO_CONDITIONAL_EXPR
} composite_pointer_operation;
/* Possible cases of expression list used by build_x_compound_expr_from_list. */
typedef enum expr_list_kind {
ELK_INIT, /* initializer */
ELK_MEM_INIT, /* member initializer */
ELK_FUNC_CAST /* functional cast */
} expr_list_kind;
/* Possible cases of implicit bad rhs conversions. */
typedef enum impl_conv_rhs {
ICR_DEFAULT_ARGUMENT, /* default argument */
ICR_CONVERTING, /* converting */
ICR_INIT, /* initialization */
ICR_ARGPASS, /* argument passing */
ICR_RETURN, /* return */
ICR_ASSIGN /* assignment */
} impl_conv_rhs;
/* Possible cases of implicit or explicit bad conversions to void. */
typedef enum impl_conv_void {
ICV_CAST, /* (explicit) conversion to void */
ICV_SECOND_OF_COND, /* second operand of conditional expression */
ICV_THIRD_OF_COND, /* third operand of conditional expression */
ICV_RIGHT_OF_COMMA, /* right operand of comma operator */
ICV_LEFT_OF_COMMA, /* left operand of comma operator */
ICV_STATEMENT, /* statement */
ICV_THIRD_IN_FOR /* for increment expression */
} impl_conv_void;
/* Possible invalid uses of an abstract class that might not have a
specific associated declaration. */
typedef enum abstract_class_use {
ACU_UNKNOWN, /* unknown or decl provided */
ACU_CAST, /* cast to abstract class */
ACU_NEW, /* new-expression of abstract class */
ACU_THROW, /* throw-expression of abstract class */
ACU_CATCH, /* catch-parameter of abstract class */
ACU_ARRAY, /* array of abstract class */
ACU_RETURN, /* return type of abstract class */
ACU_PARM /* parameter type of abstract class */
} abstract_class_use;
/* Macros for access to language-specific slots in an identifier. */
#define IDENTIFIER_NAMESPACE_BINDINGS(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->namespace_bindings)
#define IDENTIFIER_TEMPLATE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->class_template_info)
/* The IDENTIFIER_BINDING is the innermost cxx_binding for the
identifier. It's PREVIOUS is the next outermost binding. Each
VALUE field is a DECL for the associated declaration. Thus,
name lookup consists simply of pulling off the node at the front
of the list (modulo oddities for looking up the names of types,
and such.) You can use SCOPE field to determine the scope
that bound the name. */
#define IDENTIFIER_BINDING(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->bindings)
/* TREE_TYPE only indicates on local and class scope the current
type. For namespace scope, the presence of a type in any namespace
is indicated with global_type_node, and the real type behind must
be found through lookup. */
#define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE)
#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE)
#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE))
#define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0)
#define IDENTIFIER_LABEL_VALUE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->label_value)
#define SET_IDENTIFIER_LABEL_VALUE(NODE, VALUE) \
IDENTIFIER_LABEL_VALUE (NODE) = (VALUE)
/* Nonzero if this identifier is used as a virtual function name somewhere
(optimizes searches). */
#define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* Nonzero if this identifier is the prefix for a mangled C++ operator
name. */
#define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2 (NODE)
/* Nonzero if this identifier is the name of a type-conversion
operator. */
#define IDENTIFIER_TYPENAME_P(NODE) \
TREE_LANG_FLAG_4 (NODE)
/* Nonzero if this identifier is the name of a constructor or
destructor. */
#define IDENTIFIER_CTOR_OR_DTOR_P(NODE) \
TREE_LANG_FLAG_3 (NODE)
/* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague
linkage which the prelinker has assigned to this translation
unit. */
#define IDENTIFIER_REPO_CHOSEN(NAME) \
(TREE_LANG_FLAG_6 (NAME))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly)
/* The tokens stored in the default argument. */
#define DEFARG_TOKENS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens)
#define DEFARG_INSTANTIATIONS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations)
struct GTY (()) tree_default_arg {
struct tree_common common;
struct cp_token_cache *tokens;
vec<tree, va_gc> *instantiations;
};
#define DEFERRED_NOEXCEPT_PATTERN(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern)
#define DEFERRED_NOEXCEPT_ARGS(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args)
#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \
((NODE) && (TREE_PURPOSE (NODE)) \
&& (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT))
#define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \
(DEFERRED_NOEXCEPT_SPEC_P (NODE) \
&& DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE)
struct GTY (()) tree_deferred_noexcept {
struct tree_base base;
tree pattern;
tree args;
};
/* The condition associated with the static assertion. This must be
an integral constant expression. */
#define STATIC_ASSERT_CONDITION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition)
/* The message associated with the static assertion. This must be a
string constant, which will be emitted as an error message when the
static assert condition is false. */
#define STATIC_ASSERT_MESSAGE(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message)
/* Source location information for a static assertion. */
#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location)
struct GTY (()) tree_static_assert {
struct tree_common common;
tree condition;
tree message;
location_t location;
};
struct GTY (()) tree_argument_pack_select {
struct tree_common common;
tree argument_pack;
int index;
};
/* The different kinds of traits that we encounter. */
typedef enum cp_trait_kind
{
CPTK_BASES,
CPTK_DIRECT_BASES,
CPTK_HAS_NOTHROW_ASSIGN,
CPTK_HAS_NOTHROW_CONSTRUCTOR,
CPTK_HAS_NOTHROW_COPY,
CPTK_HAS_TRIVIAL_ASSIGN,
CPTK_HAS_TRIVIAL_CONSTRUCTOR,
CPTK_HAS_TRIVIAL_COPY,
CPTK_HAS_TRIVIAL_DESTRUCTOR,
CPTK_HAS_VIRTUAL_DESTRUCTOR,
CPTK_IS_ABSTRACT,
CPTK_IS_BASE_OF,
CPTK_IS_CLASS,
CPTK_IS_EMPTY,
CPTK_IS_ENUM,
CPTK_IS_FINAL,
CPTK_IS_LITERAL_TYPE,
CPTK_IS_POD,
CPTK_IS_POLYMORPHIC,
CPTK_IS_STD_LAYOUT,
CPTK_IS_TRIVIAL,
CPTK_IS_TRIVIALLY_ASSIGNABLE,
CPTK_IS_TRIVIALLY_CONSTRUCTIBLE,
CPTK_IS_TRIVIALLY_COPYABLE,
CPTK_IS_UNION,
CPTK_UNDERLYING_TYPE
} cp_trait_kind;
/* The types that we are processing. */
#define TRAIT_EXPR_TYPE1(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1)
#define TRAIT_EXPR_TYPE2(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2)
/* The specific trait that we are processing. */
#define TRAIT_EXPR_KIND(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind)
struct GTY (()) tree_trait_expr {
struct tree_common common;
tree type1;
tree type2;
enum cp_trait_kind kind;
};
/* Based off of TYPE_ANONYMOUS_P. */
#define LAMBDA_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE))
/* Test if FUNCTION_DECL is a lambda function. */
#define LAMBDA_FUNCTION_P(FNDECL) \
(DECL_OVERLOADED_OPERATOR_P (FNDECL) == CALL_EXPR \
&& LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL)))
enum cp_lambda_default_capture_mode_type {
CPLD_NONE,
CPLD_COPY,
CPLD_REFERENCE
};
/* The method of default capture, if any. */
#define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode)
/* The capture-list, including `this'. Each capture is stored as a FIELD_DECL
* so that the name, type, and field are all together, whether or not it has
* been added to the lambda's class type.
TREE_LIST:
TREE_PURPOSE: The FIELD_DECL for this capture.
TREE_VALUE: The initializer. This is part of a GNU extension. */
#define LAMBDA_EXPR_CAPTURE_LIST(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list)
/* During parsing of the lambda-introducer, the node in the capture-list
that holds the 'this' capture. During parsing of the body, the
capture proxy for that node. */
#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture)
/* Predicate tracking whether `this' is in the effective capture set. */
#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \
LAMBDA_EXPR_THIS_CAPTURE(NODE)
/* Predicate tracking whether the lambda was declared 'mutable'. */
#define LAMBDA_EXPR_MUTABLE_P(NODE) \
TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE))
/* The return type in the expression.
* NULL_TREE indicates that none was specified. */
#define LAMBDA_EXPR_RETURN_TYPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->return_type)
/* The source location of the lambda. */
#define LAMBDA_EXPR_LOCATION(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus)
/* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL,
FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */
#define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope)
/* If EXTRA_SCOPE, this is the number of the lambda within that scope. */
#define LAMBDA_EXPR_DISCRIMINATOR(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator)
/* During parsing of the lambda, a vector of capture proxies which need
to be pushed once we're done processing a nested lambda. */
#define LAMBDA_EXPR_PENDING_PROXIES(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies)
/* The closure type of the lambda. Note that the TREE_TYPE of a
LAMBDA_EXPR is always NULL_TREE, because we need to instantiate the
LAMBDA_EXPR in order to instantiate the type. */
#define LAMBDA_EXPR_CLOSURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->closure)
struct GTY (()) tree_lambda_expr
{
struct tree_typed typed;
tree capture_list;
tree this_capture;
tree return_type;
tree extra_scope;
tree closure;
vec<tree, va_gc> *pending_proxies;
location_t locus;
enum cp_lambda_default_capture_mode_type default_capture_mode;
int discriminator;
};
/* A (typedef,context,usage location) triplet.
It represents a typedef used through a
context at a given source location.
e.g.
struct foo {
typedef int myint;
};
struct bar {
foo::myint v; // #1<-- this location.
};
In bar, the triplet will be (myint, foo, #1).
*/
struct GTY(()) qualified_typedef_usage_s {
tree typedef_decl;
tree context;
location_t locus;
};
typedef struct qualified_typedef_usage_s qualified_typedef_usage_t;
/* Non-zero if this template specialization has access violations that
should be rechecked when the function is instantiated outside argument
deduction. */
#define TINFO_HAS_ACCESS_ERRORS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE)))
#define FNDECL_HAS_ACCESS_ERRORS(NODE) \
(TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE)))
/* Non-zero if this variable template specialization was specified using a
template-id, so it's a partial or full specialization and not a definition
of the member template of a particular class specialization. */
#define TINFO_USED_TEMPLATE_ID(NODE) \
(TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE)))
struct GTY(()) tree_template_info {
struct tree_common common;
vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking;
};
enum cp_tree_node_structure_enum {
TS_CP_GENERIC,
TS_CP_IDENTIFIER,
TS_CP_TPI,
TS_CP_PTRMEM,
TS_CP_BINDING,
TS_CP_OVERLOAD,
TS_CP_BASELINK,
TS_CP_TEMPLATE_DECL,
TS_CP_WRAPPER,
TS_CP_DEFAULT_ARG,
TS_CP_DEFERRED_NOEXCEPT,
TS_CP_STATIC_ASSERT,
TS_CP_ARGUMENT_PACK_SELECT,
TS_CP_TRAIT_EXPR,
TS_CP_LAMBDA_EXPR,
TS_CP_TEMPLATE_INFO,
TS_CP_USERDEF_LITERAL,
LAST_TS_CP_ENUM
};
/* The resulting tree type. */
union GTY((desc ("cp_tree_node_structure (&%h)"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node {
union tree_node GTY ((tag ("TS_CP_GENERIC"),
desc ("tree_node_structure (&%h)"))) generic;
struct template_parm_index_s GTY ((tag ("TS_CP_TPI"))) tpi;
struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem;
struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload;
struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink;
struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl;
struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg;
struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept;
struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier;
struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT")))
static_assertion;
struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT")))
argument_pack_select;
struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR")))
trait_expression;
struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR")))
lambda_expression;
struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO")))
template_info;
struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL")))
userdef_literal;
};
enum cp_tree_index
{
CPTI_JAVA_BYTE_TYPE,
CPTI_JAVA_SHORT_TYPE,
CPTI_JAVA_INT_TYPE,
CPTI_JAVA_LONG_TYPE,
CPTI_JAVA_FLOAT_TYPE,
CPTI_JAVA_DOUBLE_TYPE,
CPTI_JAVA_CHAR_TYPE,
CPTI_JAVA_BOOLEAN_TYPE,
CPTI_WCHAR_DECL,
CPTI_VTABLE_ENTRY_TYPE,
CPTI_DELTA_TYPE,
CPTI_VTABLE_INDEX_TYPE,
CPTI_CLEANUP_TYPE,
CPTI_VTT_PARM_TYPE,
CPTI_CLASS_TYPE,
CPTI_UNKNOWN_TYPE,
CPTI_INIT_LIST_TYPE,
CPTI_VTBL_TYPE,
CPTI_VTBL_PTR_TYPE,
CPTI_STD,
CPTI_ABI,
CPTI_CONST_TYPE_INFO_TYPE,
CPTI_TYPE_INFO_PTR_TYPE,
CPTI_ABORT_FNDECL,
CPTI_AGGR_TAG,
CPTI_CTOR_IDENTIFIER,
CPTI_COMPLETE_CTOR_IDENTIFIER,
CPTI_BASE_CTOR_IDENTIFIER,
CPTI_DTOR_IDENTIFIER,
CPTI_COMPLETE_DTOR_IDENTIFIER,
CPTI_BASE_DTOR_IDENTIFIER,
CPTI_DELETING_DTOR_IDENTIFIER,
CPTI_DELTA_IDENTIFIER,
CPTI_IN_CHARGE_IDENTIFIER,
CPTI_VTT_PARM_IDENTIFIER,
CPTI_NELTS_IDENTIFIER,
CPTI_THIS_IDENTIFIER,
CPTI_PFN_IDENTIFIER,
CPTI_VPTR_IDENTIFIER,
CPTI_STD_IDENTIFIER,
CPTI_LANG_NAME_C,
CPTI_LANG_NAME_CPLUSPLUS,
CPTI_LANG_NAME_JAVA,
CPTI_EMPTY_EXCEPT_SPEC,
CPTI_NOEXCEPT_TRUE_SPEC,
CPTI_NOEXCEPT_FALSE_SPEC,
CPTI_JCLASS,
CPTI_TERMINATE,
CPTI_CALL_UNEXPECTED,
CPTI_ATEXIT_FN_PTR_TYPE,
CPTI_ATEXIT,
CPTI_DSO_HANDLE,
CPTI_DCAST,
CPTI_KEYED_CLASSES,
CPTI_NULLPTR,
CPTI_NULLPTR_TYPE,
CPTI_MAX
};
extern GTY(()) tree cp_global_trees[CPTI_MAX];
#define java_byte_type_node cp_global_trees[CPTI_JAVA_BYTE_TYPE]
#define java_short_type_node cp_global_trees[CPTI_JAVA_SHORT_TYPE]
#define java_int_type_node cp_global_trees[CPTI_JAVA_INT_TYPE]
#define java_long_type_node cp_global_trees[CPTI_JAVA_LONG_TYPE]
#define java_float_type_node cp_global_trees[CPTI_JAVA_FLOAT_TYPE]
#define java_double_type_node cp_global_trees[CPTI_JAVA_DOUBLE_TYPE]
#define java_char_type_node cp_global_trees[CPTI_JAVA_CHAR_TYPE]
#define java_boolean_type_node cp_global_trees[CPTI_JAVA_BOOLEAN_TYPE]
#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
/* The type used to represent an offset by which to adjust the `this'
pointer in pointer-to-member types. */
#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
/* The type used to represent an index into the vtable. */
#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE]
#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
#define std_node cp_global_trees[CPTI_STD]
#define abi_node cp_global_trees[CPTI_ABI]
#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
#define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE]
#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
#define nullptr_node cp_global_trees[CPTI_NULLPTR]
#define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE]
/* We cache these tree nodes so as to call get_identifier less
frequently. */
/* The name of a constructor that takes an in-charge parameter to
decide whether or not to construct virtual base classes. */
#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
/* The name of a constructor that constructs virtual base classes. */
#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
/* The name of a constructor that does not construct virtual base classes. */
#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
/* The name of a destructor that takes an in-charge parameter to
decide whether or not to destroy virtual base classes and whether
or not to delete the object. */
#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes. */
#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
/* The name of a destructor that does not destroy virtual base
classes. */
#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes, and
then deletes the entire object. */
#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
/* The name of the parameter that contains a pointer to the VTT to use
for this subobject constructor or destructor. */
#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
#define nelts_identifier cp_global_trees[CPTI_NELTS_IDENTIFIER]
#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
/* The name of the std namespace. */
#define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER]
#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
#define lang_name_java cp_global_trees[CPTI_LANG_NAME_JAVA]
/* Exception specifier used for throw(). */
#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC]
#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC]
/* If non-NULL, a POINTER_TYPE equivalent to (java::lang::Class*). */
#define jclass_node cp_global_trees[CPTI_JCLASS]
/* The declaration for `std::terminate'. */
#define terminate_node cp_global_trees[CPTI_TERMINATE]
/* The declaration for "__cxa_call_unexpected". */
#define call_unexpected_node cp_global_trees[CPTI_CALL_UNEXPECTED]
/* The type of the function-pointer argument to "__cxa_atexit" (or
"std::atexit", if "__cxa_atexit" is not being used). */
#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE]
/* A pointer to `std::atexit'. */
#define atexit_node cp_global_trees[CPTI_ATEXIT]
/* A pointer to `__dso_handle'. */
#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
/* The declaration of the dynamic_cast runtime. */
#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
/* The type of a destructor. */
#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
/* The type of the vtt parameter passed to subobject constructors and
destructors. */
#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
/* A TREE_LIST of the dynamic classes whose vtables may have to be
emitted in this translation unit. */
#define keyed_classes cp_global_trees[CPTI_KEYED_CLASSES]
/* Node to indicate default access. This must be distinct from the
access nodes in tree.h. */
#define access_default_node null_node
/* Global state. */
struct GTY(()) saved_scope {
vec<cxx_saved_binding, va_gc> *old_bindings;
tree old_namespace;
vec<tree, va_gc> *decl_ns_list;
tree class_name;
tree class_type;
tree access_specifier;
tree function_decl;
vec<tree, va_gc> *lang_base;
tree lang_name;
tree template_parms;
cp_binding_level *x_previous_class_level;
tree x_saved_tree;
/* Only used for uses of this in trailing return type. */
tree x_current_class_ptr;
tree x_current_class_ref;
int x_processing_template_decl;
int x_processing_specialization;
BOOL_BITFIELD x_processing_explicit_instantiation : 1;
BOOL_BITFIELD need_pop_function_context : 1;
int unevaluated_operand;
int inhibit_evaluation_warnings;
int noexcept_operand;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int omp_declare_target_attribute;
struct stmt_tree_s x_stmt_tree;
cp_binding_level *class_bindings;
cp_binding_level *bindings;
hash_map<tree, tree> *GTY((skip)) x_local_specializations;
struct saved_scope *prev;
};
extern GTY(()) struct saved_scope *scope_chain;
/* The current open namespace. */
#define current_namespace scope_chain->old_namespace
/* The stack for namespaces of current declarations. */
#define decl_namespace_list scope_chain->decl_ns_list
/* IDENTIFIER_NODE: name of current class */
#define current_class_name scope_chain->class_name
/* _TYPE: the type of the current class */
#define current_class_type scope_chain->class_type
/* When parsing a class definition, the access specifier most recently
given by the user, or, if no access specifier was given, the
default value appropriate for the kind of class (i.e., struct,
class, or union). */
#define current_access_specifier scope_chain->access_specifier
/* Pointer to the top of the language name stack. */
#define current_lang_base scope_chain->lang_base
#define current_lang_name scope_chain->lang_name
/* When parsing a template declaration, a TREE_LIST represents the
active template parameters. Each node in the list represents one
level of template parameters. The innermost level is first in the
list. The depth of each level is stored as an INTEGER_CST in the
TREE_PURPOSE of each node. The parameters for that level are
stored in the TREE_VALUE. */
#define current_template_parms scope_chain->template_parms
#define processing_template_decl scope_chain->x_processing_template_decl
#define processing_specialization scope_chain->x_processing_specialization
#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation
/* RAII sentinel to handle clearing processing_template_decl and restoring
it when done. */
struct processing_template_decl_sentinel
{
int saved;
processing_template_decl_sentinel (bool reset = true)
: saved (processing_template_decl)
{
if (reset)
processing_template_decl = 0;
}
~processing_template_decl_sentinel()
{
processing_template_decl = saved;
}
};
/* RAII sentinel to disable certain warnings during template substitution
and elsewhere. */
struct warning_sentinel
{
int &flag;
int val;
warning_sentinel(int& flag, bool suppress=true)
: flag(flag), val(flag) { if (suppress) flag = 0; }
~warning_sentinel() { flag = val; }
};
/* The cached class binding level, from the most recently exited
class, or NULL if none. */
#define previous_class_level scope_chain->x_previous_class_level
/* A map from local variable declarations in the body of the template
presently being instantiated to the corresponding instantiated
local variables. */
#define local_specializations scope_chain->x_local_specializations
/* Nonzero if we are parsing the operand of a noexcept operator. */
#define cp_noexcept_operand scope_chain->noexcept_operand
/* A list of private types mentioned, for deferred access checking. */
struct GTY((for_user)) cxx_int_tree_map {
unsigned int uid;
tree to;
};
struct cxx_int_tree_map_hasher : ggc_hasher<cxx_int_tree_map *>
{
static hashval_t hash (cxx_int_tree_map *);
static bool equal (cxx_int_tree_map *, cxx_int_tree_map *);
};
struct named_label_entry;
struct named_label_hasher : ggc_hasher<named_label_entry *>
{
static hashval_t hash (named_label_entry *);
static bool equal (named_label_entry *, named_label_entry *);
};
/* Global state pertinent to the current function. */
struct GTY(()) language_function {
struct c_language_function base;
tree x_cdtor_label;
tree x_current_class_ptr;
tree x_current_class_ref;
tree x_eh_spec_block;
tree x_in_charge_parm;
tree x_vtt_parm;
tree x_return_value;
tree x_auto_return_pattern;
BOOL_BITFIELD returns_value : 1;
BOOL_BITFIELD returns_null : 1;
BOOL_BITFIELD returns_abnormally : 1;
BOOL_BITFIELD infinite_loop: 1;
BOOL_BITFIELD x_in_function_try_handler : 1;
BOOL_BITFIELD x_in_base_initializer : 1;
/* True if this function can throw an exception. */
BOOL_BITFIELD can_throw : 1;
BOOL_BITFIELD invalid_constexpr : 1;
hash_table<named_label_hasher> *x_named_labels;
cp_binding_level *bindings;
vec<tree, va_gc> *x_local_names;
/* Tracking possibly infinite loops. This is a vec<tree> only because
vec<bool> doesn't work with gtype. */
vec<tree, va_gc> *infinite_loops;
hash_table<cxx_int_tree_map_hasher> *extern_decl_map;
};
/* The current C++-specific per-function global variables. */
#define cp_function_chain (cfun->language)
/* In a constructor destructor, the point at which all derived class
destroying/construction has been done. I.e., just before a
constructor returns, or before any base class destroying will be done
in a destructor. */
#define cdtor_label cp_function_chain->x_cdtor_label
/* When we're processing a member function, current_class_ptr is the
PARM_DECL for the `this' pointer. The current_class_ref is an
expression for `*this'. */
#define current_class_ptr \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ptr \
: &scope_chain->x_current_class_ptr))
#define current_class_ref \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ref \
: &scope_chain->x_current_class_ref))
/* The EH_SPEC_BLOCK for the exception-specifiers for the current
function, if any. */
#define current_eh_spec_block cp_function_chain->x_eh_spec_block
/* The `__in_chrg' parameter for the current function. Only used for
constructors and destructors. */
#define current_in_charge_parm cp_function_chain->x_in_charge_parm
/* The `__vtt_parm' parameter for the current function. Only used for
constructors and destructors. */
#define current_vtt_parm cp_function_chain->x_vtt_parm
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
#define current_function_returns_value cp_function_chain->returns_value
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
#define current_function_returns_null cp_function_chain->returns_null
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
#define current_function_returns_abnormally \
cp_function_chain->returns_abnormally
/* Set to 0 at beginning of a function definition, set to 1 if we see an
obvious infinite loop. This can have false positives and false
negatives, so it should only be used as a heuristic. */
#define current_function_infinite_loop cp_function_chain->infinite_loop
/* Nonzero if we are processing a base initializer. Zero elsewhere. */
#define in_base_initializer cp_function_chain->x_in_base_initializer
#define in_function_try_handler cp_function_chain->x_in_function_try_handler
/* Expression always returned from function, or error_mark_node
otherwise, for use by the automatic named return value optimization. */
#define current_function_return_value \
(cp_function_chain->x_return_value)
/* A type involving 'auto' to be used for return type deduction. */
#define current_function_auto_return_pattern \
(cp_function_chain->x_auto_return_pattern)
/* True if NAME is the IDENTIFIER_NODE for an overloaded "operator
new" or "operator delete". */
#define NEW_DELETE_OPNAME_P(NAME) \
((NAME) == ansi_opname (NEW_EXPR) \
|| (NAME) == ansi_opname (VEC_NEW_EXPR) \
|| (NAME) == ansi_opname (DELETE_EXPR) \
|| (NAME) == ansi_opname (VEC_DELETE_EXPR))
#define ansi_opname(CODE) \
(operator_name_info[(int) (CODE)].identifier)
#define ansi_assopname(CODE) \
(assignment_operator_name_info[(int) (CODE)].identifier)
/* TRUE if a tree code represents a statement. */
extern bool statement_code_p[MAX_TREE_CODES];
#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)]
enum languages { lang_c, lang_cplusplus, lang_java };
/* Macros to make error reporting functions' lives easier. */
#define TYPE_LINKAGE_IDENTIFIER(NODE) \
(TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
/* Nonzero if NODE has no name for linkage purposes. */
#define TYPE_ANONYMOUS_P(NODE) \
(OVERLOAD_TYPE_P (NODE) && ANON_AGGRNAME_P (TYPE_LINKAGE_IDENTIFIER (NODE)))
/* The _DECL for this _TYPE. */
#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
/* Nonzero if T is a type that could resolve to any kind of concrete type
at instantiation time. */
#define WILDCARD_TYPE_P(T) \
(TREE_CODE (T) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (T) == TYPENAME_TYPE \
|| TREE_CODE (T) == TYPEOF_TYPE \
|| TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (T) == DECLTYPE_TYPE)
/* Nonzero if T is a class (or struct or union) type. Also nonzero
for template type parameters, typename types, and instantiated
template template parameters. Keep these checks in ascending code
order. */
#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
/* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
union type. */
#define SET_CLASS_TYPE_P(T, VAL) \
(TYPE_LANG_FLAG_5 (T) = (VAL))
/* Nonzero if T is a class type. Zero for template type parameters,
typename types, and so forth. */
#define CLASS_TYPE_P(T) \
(RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
/* Nonzero if T is a class type but not an union. */
#define NON_UNION_CLASS_TYPE_P(T) \
(CLASS_TYPE_P (T) && TREE_CODE (T) != UNION_TYPE)
/* Keep these checks in ascending code order. */
#define RECORD_OR_UNION_CODE_P(T) \
((T) == RECORD_TYPE || (T) == UNION_TYPE)
#define OVERLOAD_TYPE_P(T) \
(CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
/* True if this a "Java" type, defined in 'extern "Java"'. */
#define TYPE_FOR_JAVA(NODE) TYPE_LANG_FLAG_3 (NODE)
/* True if this type is dependent. This predicate is only valid if
TYPE_DEPENDENT_P_VALID is true. */
#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
/* True if dependent_type_p has been called for this type, with the
result that TYPE_DEPENDENT_P is valid. */
#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE)
/* Nonzero if this type is const-qualified. */
#define CP_TYPE_CONST_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
/* Nonzero if this type is volatile-qualified. */
#define CP_TYPE_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
/* Nonzero if this type is restrict-qualified. */
#define CP_TYPE_RESTRICT_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0)
/* Nonzero if this type is const-qualified, but not
volatile-qualified. Other qualifiers are ignored. This macro is
used to test whether or not it is OK to bind an rvalue to a
reference. */
#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
== TYPE_QUAL_CONST)
#define FUNCTION_ARG_CHAIN(NODE) \
TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
which refers to a user-written parameter. */
#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Similarly, but for DECL_ARGUMENTS. */
#define FUNCTION_FIRST_USER_PARM(NODE) \
skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and
ambiguity issues. */
#define DERIVED_FROM_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE)
/* Gives the visibility specification for a class type. */
#define CLASSTYPE_VISIBILITY(TYPE) \
DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE))
#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \
DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE))
typedef struct GTY (()) tree_pair_s {
tree purpose;
tree value;
} tree_pair_s;
typedef tree_pair_s *tree_pair_p;
/* This is a few header flags for 'struct lang_type'. Actually,
all but the first are used only for lang_type_class; they
are put in this structure to save space. */
struct GTY(()) lang_type_header {
BOOL_BITFIELD is_lang_type_class : 1;
BOOL_BITFIELD has_type_conversion : 1;
BOOL_BITFIELD has_copy_ctor : 1;
BOOL_BITFIELD has_default_ctor : 1;
BOOL_BITFIELD const_needs_init : 1;
BOOL_BITFIELD ref_needs_init : 1;
BOOL_BITFIELD has_const_copy_assign : 1;
BOOL_BITFIELD spare : 1;
};
/* This structure provides additional information above and beyond
what is provide in the ordinary tree_type. In the past, we used it
for the types of class types, template parameters types, typename
types, and so forth. However, there can be many (tens to hundreds
of thousands) of template parameter types in a compilation, and
there's no need for this additional information in that case.
Therefore, we now use this data structure only for class types.
In the past, it was thought that there would be relatively few
class types. However, in the presence of heavy use of templates,
many (i.e., thousands) of classes can easily be generated.
Therefore, we should endeavor to keep the size of this structure to
a minimum. */
struct GTY(()) lang_type_class {
struct lang_type_header h;
unsigned char align;
unsigned has_mutable : 1;
unsigned com_interface : 1;
unsigned non_pod_class : 1;
unsigned nearly_empty_p : 1;
unsigned user_align : 1;
unsigned has_copy_assign : 1;
unsigned has_new : 1;
unsigned has_array_new : 1;
unsigned gets_delete : 2;
unsigned interface_only : 1;
unsigned interface_unknown : 1;
unsigned contains_empty_class_p : 1;
unsigned anon_aggr : 1;
unsigned non_zero_init : 1;
unsigned empty_p : 1;
unsigned vec_new_uses_cookie : 1;
unsigned declared_class : 1;
unsigned diamond_shaped : 1;
unsigned repeated_base : 1;
unsigned being_defined : 1;
unsigned java_interface : 1;
unsigned debug_requested : 1;
unsigned fields_readonly : 1;
unsigned use_template : 2;
unsigned ptrmemfunc_flag : 1;
unsigned was_anonymous : 1;
unsigned lazy_default_ctor : 1;
unsigned lazy_copy_ctor : 1;
unsigned lazy_copy_assign : 1;
unsigned lazy_destructor : 1;
unsigned has_const_copy_ctor : 1;
unsigned has_complex_copy_ctor : 1;
unsigned has_complex_copy_assign : 1;
unsigned non_aggregate : 1;
unsigned has_complex_dflt : 1;
unsigned has_list_ctor : 1;
unsigned non_std_layout : 1;
unsigned is_literal : 1;
unsigned lazy_move_ctor : 1;
unsigned lazy_move_assign : 1;
unsigned has_complex_move_ctor : 1;
unsigned has_complex_move_assign : 1;
unsigned has_constexpr_ctor : 1;
/* When adding a flag here, consider whether or not it ought to
apply to a template instance if it applies to the template. If
so, make sure to copy it in instantiate_class_template! */
/* There are some bits left to fill out a 32-bit word. Keep track
of this by updating the size of this bitfield whenever you add or
remove a flag. */
unsigned dummy : 3;
tree primary_base;
vec<tree_pair_s, va_gc> *vcall_indices;
tree vtables;
tree typeinfo_var;
vec<tree, va_gc> *vbases;
binding_table nested_udts;
tree as_base;
vec<tree, va_gc> *pure_virtuals;
tree friend_classes;
vec<tree, va_gc> * GTY((reorder ("resort_type_method_vec"))) methods;
tree key_method;
tree decl_list;
tree template_info;
tree befriending_classes;
/* In a RECORD_TYPE, information specific to Objective-C++, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
/* sorted_fields is sorted based on a pointer, so we need to be able
to resort it if pointers get rearranged. */
struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields")))
sorted_fields;
/* FIXME reuse another field? */
tree lambda_expr;
};
struct GTY(()) lang_type_ptrmem {
struct lang_type_header h;
tree record;
};
struct GTY(()) lang_type {
union lang_type_u
{
struct lang_type_header GTY((skip (""))) h;
struct lang_type_class GTY((tag ("1"))) c;
struct lang_type_ptrmem GTY((tag ("0"))) ptrmem;
} GTY((desc ("%h.h.is_lang_type_class"))) u;
};
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_TYPE_CLASS_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (! lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.c; })
#define LANG_TYPE_PTRMEM_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ptrmem; })
#else
#define LANG_TYPE_CLASS_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.c)
#define LANG_TYPE_PTRMEM_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.ptrmem)
#endif /* ENABLE_TREE_CHECKING */
/* Nonzero for _CLASSTYPE means that operator delete is defined. */
#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete)
#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
/* Nonzero if `new NODE[x]' should cause the allocation of extra
storage to indicate how many array elements are in use. */
#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
(CLASS_TYPE_P (NODE) \
&& LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie)
/* Nonzero means that this _CLASSTYPE node defines ways of converting
itself to other types. */
#define TYPE_HAS_CONVERSION(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_type_conversion)
/* Nonzero means that NODE (a class type) has a default constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
/* Nonzero means that NODE (a class type) has a copy constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor)
/* Nonzero means that NODE (a class type) has a move constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign)
/* Nonzero means that NODE (a class type) has a destructor -- but that
it has not yet been declared. */
#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
/* Nonzero means that NODE (a class type) is final */
#define CLASSTYPE_FINAL(NODE) \
TYPE_FINAL_P (NODE)
/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
#define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign)
/* True iff the class type NODE has an "operator =" whose parameter
has a parameter of type "const X&". */
#define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_const_copy_assign)
/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
#define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->h.has_copy_ctor)
#define TYPE_HAS_CONST_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor)
/* Nonzero if this class has an X(initializer_list<T>) constructor. */
#define TYPE_HAS_LIST_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor)
/* Nonzero if this class has a constexpr constructor other than a copy/move
constructor. Note that a class can have constexpr constructors for
static initialization even if it isn't a literal class. */
#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor)
/* Nonzero if this class defines an overloaded operator new. (An
operator new [] doesn't count.) */
#define TYPE_HAS_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_new)
/* Nonzero if this class defines an overloaded operator new[]. */
#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_array_new)
/* Nonzero means that this type is being defined. I.e., the left brace
starting the definition of this type has been seen. */
#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
/* Nonzero means that this type is either complete or being defined, so we
can do lookup in it. */
#define COMPLETE_OR_OPEN_TYPE_P(NODE) \
(COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE)))
/* Mark bits for repeated base checks. */
#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE))
/* Nonzero if the class NODE has multiple paths to the same (virtual)
base object. */
#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped)
/* Nonzero if the class NODE has multiple instances of the same base
type. */
#define CLASSTYPE_REPEATED_BASE_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->repeated_base)
/* The member function with which the vtable will be emitted:
the first noninline non-pure-virtual member function. NULL_TREE
if there is no key function or if this is a class template */
#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
/* Vector member functions defined in this class. Each element is
either a FUNCTION_DECL, a TEMPLATE_DECL, or an OVERLOAD. All
functions with the same name end up in the same slot. The first
two elements are for constructors, and destructors, respectively.
All template conversion operators to innermost template dependent
types are overloaded on the next slot, if they exist. Note, the
names for these functions will not all be the same. The
non-template conversion operators & templated conversions to
non-innermost template types are next, followed by ordinary member
functions. There may be empty entries at the end of the vector.
The conversion operators are unsorted. The ordinary member
functions are sorted, once the class is complete. */
#define CLASSTYPE_METHOD_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->methods)
/* For class templates, this is a TREE_LIST of all member data,
functions, types, and friends in the order of declaration.
The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
and the RECORD_TYPE for the class template otherwise. */
#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
/* The slot in the CLASSTYPE_METHOD_VEC where constructors go. */
#define CLASSTYPE_CONSTRUCTOR_SLOT 0
/* The slot in the CLASSTYPE_METHOD_VEC where destructors go. */
#define CLASSTYPE_DESTRUCTOR_SLOT 1
/* The first slot in the CLASSTYPE_METHOD_VEC where conversion
operators can appear. */
#define CLASSTYPE_FIRST_CONVERSION_SLOT 2
/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
are the constructors that take an in-charge parameter. */
#define CLASSTYPE_CONSTRUCTORS(NODE) \
((*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_CONSTRUCTOR_SLOT])
/* A FUNCTION_DECL for the destructor for NODE. These are the
destructors that take an in-charge parameter. If
CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
until the destructor is created with lazily_declare_fn. */
#define CLASSTYPE_DESTRUCTORS(NODE) \
(CLASSTYPE_METHOD_VEC (NODE) \
? (*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_DESTRUCTOR_SLOT] \
: NULL_TREE)
/* A dictionary of the nested user-defined-types (class-types, or enums)
found within this class. This table includes nested member class
templates. */
#define CLASSTYPE_NESTED_UTDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nested_udts)
/* Nonzero if NODE has a primary base class, i.e., a base class with
which it shares the virtual function table pointer. */
#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
(CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
/* If non-NULL, this is the binfo for the primary base class, i.e.,
the base class which contains the virtual function table pointer
for this class. */
#define CLASSTYPE_PRIMARY_BINFO(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
/* A vector of BINFOs for the direct and indirect virtual base classes
that this type uses in a post-order depth-first left-to-right
order. (In other words, these bases appear in the order that they
should be initialized.) */
#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
/* The type corresponding to NODE when NODE is used as a base class,
i.e., NODE without virtual base classes. */
#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */
#define IS_FAKE_BASE_TYPE(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \
&& CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE))
/* These are the size and alignment of the type without its virtual
base classes, for when we use this type as a base itself. */
#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE))
/* The alignment of NODE, without its virtual bases, in bytes. */
#define CLASSTYPE_ALIGN_UNIT(NODE) \
(CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT)
/* True if this a Java interface type, declared with
'__attribute__ ((java_interface))'. */
#define TYPE_JAVA_INTERFACE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->java_interface)
/* A vec<tree> of virtual functions which cannot be inherited by
derived classes. When deriving from this type, the derived
class must provide its own definition for each of these functions. */
#define CLASSTYPE_PURE_VIRTUALS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals)
/* Nonzero means that this type is an abstract class type. */
#define ABSTRACT_CLASS_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE))
/* Nonzero means that this type has an X() constructor. */
#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_default_ctor)
/* Nonzero means that this type contains a mutable member. */
#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable)
#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE))
/* Nonzero means that this class type is not POD for the purpose of layout
(as defined in the ABI). This is different from the language's POD. */
#define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class)
/* Nonzero means that this class type is a non-standard-layout class. */
#define CLASSTYPE_NON_STD_LAYOUT(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout)
/* Nonzero means that this class contains pod types whose default
initialization is not a zero initialization (namely, pointers to
data members). */
#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init)
/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
#define CLASSTYPE_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
/* Nonzero if this class is "nearly empty", i.e., contains only a
virtual function table pointer. */
#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p)
/* Nonzero if this class contains an empty subobject. */
#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p)
/* A list of class types of which this type is a friend. The
TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the
case of a template friend. */
#define CLASSTYPE_FRIEND_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->friend_classes)
/* A list of the classes which grant friendship to this class. */
#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes)
/* The associated LAMBDA_EXPR that made this class. */
#define CLASSTYPE_LAMBDA_EXPR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr)
/* The extra mangling scope for this closure type. */
#define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \
(LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE)))
/* Say whether this node was declared as a "class" or a "struct". */
#define CLASSTYPE_DECLARED_CLASS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->declared_class)
/* Nonzero if this class has const members
which have no specified initialization. */
#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init : 0)
#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init = (VALUE))
/* Nonzero if this class has ref members
which have no specified initialization. */
#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init : 0)
#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init = (VALUE))
/* Nonzero if this class is included from a header file which employs
`#pragma interface', and it is not included in its implementation file. */
#define CLASSTYPE_INTERFACE_ONLY(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
/* True if we have already determined whether or not vtables, VTTs,
typeinfo, and other similar per-class data should be emitted in
this translation unit. This flag does not indicate whether or not
these items should be emitted; it only indicates that we know one
way or the other. */
#define CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0)
/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */
#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown)
#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1)
#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0)
/* Nonzero if a _DECL node requires us to output debug info for this class. */
#define CLASSTYPE_DEBUG_REQUESTED(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->debug_requested)
/* Additional macros for inheritance information. */
/* Nonzero means that this class is on a path leading to a new vtable. */
#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE)
/* Nonzero means B (a BINFO) has its own vtable. Any copies will not
have this flag set. */
#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B))
/* Compare a BINFO_TYPE with another type for equality. For a binfo,
this is functionally equivalent to using same_type_p, but
measurably faster. At least one of the arguments must be a
BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If
BINFO_TYPE(T) ever stops being the main variant of the class the
binfo is for, this macro must change. */
#define SAME_BINFO_TYPE_P(A, B) ((A) == (B))
/* Any subobject that needs a new vtable must have a vptr and must not
be a non-virtual primary base (since it would then use the vtable from a
derived class and never become non-primary.) */
#define SET_BINFO_NEW_VTABLE_MARKED(B) \
(BINFO_NEW_VTABLE_MARKED (B) = 1, \
gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \
gcc_assert (TYPE_VFIELD (BINFO_TYPE (B))))
/* Nonzero if this binfo is for a dependent base - one that should not
be searched. */
#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE)
/* Nonzero if this binfo has lost its primary base binfo (because that
is a nearly-empty virtual base that has been taken by some other
base in the complete hierarchy. */
#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE)
/* Nonzero if this BINFO is a primary base class. */
#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE)
/* Used by various search routines. */
#define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE)
/* A vec<tree_pair_s> of the vcall indices associated with the class
NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
function. The VALUE is the index into the virtual table where the
vcall offset for that function is stored, when NODE is a virtual
base. */
#define CLASSTYPE_VCALL_INDICES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices)
/* The various vtables for the class NODE. The primary vtable will be
first, followed by the construction vtables and VTT, if any. */
#define CLASSTYPE_VTABLES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vtables)
/* The std::type_info variable representing this class, or NULL if no
such variable has been created. This field is only set for the
TYPE_MAIN_VARIANT of the class. */
#define CLASSTYPE_TYPEINFO_VAR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
/* Accessor macros for the BINFO_VIRTUALS list. */
/* The number of bytes by which to adjust the `this' pointer when
calling this virtual function. Subtract this value from the this
pointer. Always non-NULL, might be constant zero though. */
#define BV_DELTA(NODE) (TREE_PURPOSE (NODE))
/* If non-NULL, the vtable index at which to find the vcall offset
when calling this virtual function. Add the value at that vtable
index to the this pointer. */
#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE))
/* The function to call. */
#define BV_FN(NODE) (TREE_VALUE (NODE))
/* Whether or not this entry is for a lost primary virtual base. */
#define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
will be NULL_TREE to indicate a throw specification of `()', or
no exceptions allowed. For a noexcept specification, TREE_VALUE
is NULL_TREE and TREE_PURPOSE is the constant-expression. For
a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT
(for templates) or an OVERLOAD list of functions (for implicitly
declared functions). */
#define TYPE_RAISES_EXCEPTIONS(NODE) \
TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'
or noexcept(true). */
#define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the
case for things declared noexcept(true) and, with -fnothrow-opt, for
throw() functions. */
#define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE)
/* The binding level associated with the namespace. */
#define NAMESPACE_LEVEL(NODE) \
(LANG_DECL_NS_CHECK (NODE)->level)
/* Flags shared by all forms of DECL_LANG_SPECIFIC.
Some of the flags live here only to make lang_decl_min/fn smaller. Do
not make this struct larger than 32 bits; instead, make sel smaller. */
struct GTY(()) lang_decl_base {
unsigned selector : 16; /* Larger than necessary for faster access. */
ENUM_BITFIELD(languages) language : 4;
unsigned use_template : 2;
unsigned not_really_extern : 1; /* var or fn */
unsigned initialized_in_class : 1; /* var or fn */
unsigned repo_available_p : 1; /* var or fn */
unsigned threadprivate_or_deleted_p : 1; /* var or fn */
unsigned anticipated_p : 1; /* fn, type or template */
unsigned friend_attr : 1; /* fn, type or template */
unsigned template_conv_p : 1; /* var or template */
unsigned odr_used : 1; /* var or fn */
unsigned u2sel : 1;
/* 1 spare bit */
};
/* True for DECL codes which have template info and access. */
#define LANG_DECL_HAS_MIN(NODE) \
(VAR_OR_FUNCTION_DECL_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL \
|| TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == USING_DECL)
/* DECL_LANG_SPECIFIC for the above codes. */
struct GTY(()) lang_decl_min {
struct lang_decl_base base;
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_ALIAS.
In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
DECL_TEMPLATE_INFO. */
tree template_info;
union lang_decl_u2 {
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_VIRTUAL_OFFSET.
Otherwise this is DECL_ACCESS. */
tree GTY ((tag ("0"))) access;
/* For VAR_DECL in function, this is DECL_DISCRIMINATOR. */
int GTY ((tag ("1"))) discriminator;
} GTY ((desc ("%0.u.base.u2sel"))) u2;
};
/* Additional DECL_LANG_SPECIFIC information for functions. */
struct GTY(()) lang_decl_fn {
struct lang_decl_min min;
/* In an overloaded operator, this is the value of
DECL_OVERLOADED_OPERATOR_P. */
ENUM_BITFIELD (tree_code) operator_code : 16;
unsigned global_ctor_p : 1;
unsigned global_dtor_p : 1;
unsigned assignment_operator_p : 1;
unsigned static_function : 1;
unsigned pure_virtual : 1;
unsigned defaulted_p : 1;
unsigned has_in_charge_parm_p : 1;
unsigned has_vtt_parm_p : 1;
unsigned pending_inline_p : 1;
unsigned nonconverting : 1;
unsigned thunk_p : 1;
unsigned this_thunk_p : 1;
unsigned hidden_friend_p : 1;
unsigned omp_declare_reduction_p : 1;
/* 2 spare bits on 32-bit hosts, 34 on 64-bit hosts. */
/* For a non-thunk function decl, this is a tree list of
friendly classes. For a thunk function decl, it is the
thunked to function decl. */
tree befriending_classes;
/* For a non-virtual FUNCTION_DECL, this is
DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which
DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
this pointer and result pointer adjusting thunks are
chained here. This pointer thunks to return pointer thunks
will be chained on the return pointer thunk. */
tree context;
union lang_decl_u5
{
/* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is
DECL_CLONED_FUNCTION. */
tree GTY ((tag ("0"))) cloned_function;
/* In a FUNCTION_DECL for which THUNK_P holds this is the
THUNK_FIXED_OFFSET. */
HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
} GTY ((desc ("%1.thunk_p"))) u5;
union lang_decl_u3
{
struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info;
struct language_function * GTY ((tag ("0")))
saved_language_function;
} GTY ((desc ("%1.pending_inline_p"))) u;
};
/* DECL_LANG_SPECIFIC for namespaces. */
struct GTY(()) lang_decl_ns {
struct lang_decl_base base;
cp_binding_level *level;
tree ns_using;
tree ns_users;
};
/* DECL_LANG_SPECIFIC for parameters. */
struct GTY(()) lang_decl_parm {
struct lang_decl_base base;
int level;
int index;
};
/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a
union rather than a struct containing a union as its only field, but
tree.h declares it as a struct. */
struct GTY(()) lang_decl {
union GTY((desc ("%h.base.selector"))) lang_decl_u {
struct lang_decl_base GTY ((default)) base;
struct lang_decl_min GTY((tag ("0"))) min;
struct lang_decl_fn GTY ((tag ("1"))) fn;
struct lang_decl_ns GTY((tag ("2"))) ns;
struct lang_decl_parm GTY((tag ("3"))) parm;
} u;
};
/* Looks through a template (if present) to find what it declares. */
#define STRIP_TEMPLATE(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_DECL_MIN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE)) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min; })
/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function
template, not just on a FUNCTION_DECL. So when looking for things in
lang_decl_fn, look down through a TEMPLATE_DECL into its result. */
#define LANG_DECL_FN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \
if (!DECL_DECLARES_FUNCTION_P (NODE) || lt->u.base.selector != 1) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.fn; })
#define LANG_DECL_NS_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != NAMESPACE_DECL || lt->u.base.selector != 2) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ns; })
#define LANG_DECL_PARM_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != PARM_DECL) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.parm; })
#define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min.u2; })
#else
#define LANG_DECL_MIN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.min)
#define LANG_DECL_FN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn)
#define LANG_DECL_NS_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.ns)
#define LANG_DECL_PARM_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.parm)
#define LANG_DECL_U2_CHECK(NODE, TF) \
(&DECL_LANG_SPECIFIC (NODE)->u.min.u2)
#endif /* ENABLE_TREE_CHECKING */
/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the
declaration. Some entities (like a member function in a local
class, or a local variable) do not have linkage at all, and this
macro should not be used in those cases.
Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was
created by language-independent code, and has C linkage. Most
VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but
we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */
#define DECL_LANGUAGE(NODE) \
(DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.language \
: (TREE_CODE (NODE) == FUNCTION_DECL \
? lang_c : lang_cplusplus))
/* Set the language linkage for NODE to LANGUAGE. */
#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE))
/* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function
is a constructor. */
#define DECL_CONSTRUCTOR_P(NODE) \
DECL_CXX_CONSTRUCTOR_P (STRIP_TEMPLATE (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
object. */
#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
object. */
#define DECL_BASE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
specialized in-charge constructor or the specialized not-in-charge
constructor. */
#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
&& !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
#define DECL_COPY_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */
#define DECL_MOVE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE))
/* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL)
is a destructor. */
#define DECL_DESTRUCTOR_P(NODE) \
DECL_CXX_DESTRUCTOR_P (STRIP_TEMPLATE (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
specialized in-charge constructor, in-charge deleting constructor,
or the base destructor. */
#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_DESTRUCTOR_P (NODE) \
&& !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object. */
#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
object. */
#define DECL_BASE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object that deletes the object after it has been destroyed. */
#define DECL_DELETING_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == deleting_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or
destructor. */
#define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true))
/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
cloned. */
#define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false))
/* Perform an action for each clone of FN, if FN is a function with
clones. This macro should be used like:
FOR_EACH_CLONE (clone, fn)
{ ... }
*/
#define FOR_EACH_CLONE(CLONE, FN) \
if (TREE_CODE (FN) == FUNCTION_DECL \
&& (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \
|| DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))) \
for (CLONE = DECL_CHAIN (FN); \
CLONE && DECL_CLONED_FUNCTION_P (CLONE); \
CLONE = DECL_CHAIN (CLONE))
/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */
#define DECL_DISCRIMINATOR_P(NODE) \
(VAR_P (NODE) && DECL_FUNCTION_SCOPE_P (NODE))
/* Discriminator for name mangling. */
#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator)
/* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */
#define DECL_DISCRIMINATOR_SET_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1)
/* The index of a user-declared parameter in its function, starting at 1.
All artificial parameters will have index 0. */
#define DECL_PARM_INDEX(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->index)
/* The level of a user-declared parameter in its function, starting at 1.
A parameter of the function will have level 1; a parameter of the first
nested function declarator (i.e. t in void f (void (*p)(T t))) will have
level 2. */
#define DECL_PARM_LEVEL(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->level)
/* Nonzero if the VTT parm has been added to NODE. */
#define DECL_HAS_VTT_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p)
/* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is
required. */
#define DECL_NEEDS_VTT_PARM_P(NODE) \
(CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \
&& (DECL_BASE_CONSTRUCTOR_P (NODE) \
|| DECL_BASE_DESTRUCTOR_P (NODE)))
/* Nonzero if NODE is a user-defined conversion operator. */
#define DECL_CONV_FN_P(NODE) \
(DECL_NAME (NODE) && IDENTIFIER_TYPENAME_P (DECL_NAME (NODE)))
/* If FN is a conversion operator, the type to which it converts.
Otherwise, NULL_TREE. */
#define DECL_CONV_FN_TYPE(FN) \
(DECL_CONV_FN_P (FN) ? TREE_TYPE (DECL_NAME (FN)) : NULL_TREE)
/* Nonzero if NODE, which is a TEMPLATE_DECL, is a template
conversion operator to a type dependent on the innermost template
args. */
#define DECL_TEMPLATE_CONV_FN_P(NODE) \
(DECL_LANG_SPECIFIC (TEMPLATE_DECL_CHECK (NODE))->u.base.template_conv_p)
/* Nonzero if NODE, a static data member, was declared in its class as an
array of unknown bound. */
#define VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.template_conv_p \
: false)
#define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.template_conv_p = true)
/* Set the overloaded operator code for NODE to CODE. */
#define SET_OVERLOADED_OPERATOR_CODE(NODE, CODE) \
(LANG_DECL_FN_CHECK (NODE)->operator_code = (CODE))
/* If NODE is an overloaded operator, then this returns the TREE_CODE
associated with the overloaded operator.
DECL_ASSIGNMENT_OPERATOR_P must also be checked to determine
whether or not NODE is an assignment operator. If NODE is not an
overloaded operator, ERROR_MARK is returned. Since the numerical
value of ERROR_MARK is zero, this macro can be used as a predicate
to test whether or not NODE is an overloaded operator. */
#define DECL_OVERLOADED_OPERATOR_P(NODE) \
(IDENTIFIER_OPNAME_P (DECL_NAME (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->operator_code : ERROR_MARK)
/* Nonzero if NODE is an assignment operator (including += and such). */
#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->assignment_operator_p)
/* For FUNCTION_DECLs: nonzero means that this function is a
constructor or a destructor with an extra in-charge parameter to
control whether or not virtual bases are constructed. */
#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p)
/* Nonzero if DECL is a declaration of __builtin_constant_p. */
#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
&& DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
/* Nonzero for _DECL means that this decl appears in (or will appear
in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
detecting circularity in case members are multiply defined. In the
case of a VAR_DECL, it is also used to determine how program storage
should be allocated. */
#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
/* Nonzero for a VAR_DECL means that the variable's initialization (if
any) has been processed. (In general, DECL_INITIALIZED_P is
!DECL_EXTERNAL, but static data members may be initialized even if
not defined.) */
#define DECL_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL iff an explicit initializer was provided
or a non-trivial constructor is called. */
#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL that was initialized with a
constant-expression. */
#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
(TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
/* Nonzero if the DECL was initialized in the class definition itself,
rather than outside the class. This is used for both static member
VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */
#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.initialized_in_class)
/* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr].
Only available for decls with DECL_LANG_SPECIFIC. */
#define DECL_ODR_USED(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.odr_used)
/* Nonzero for DECL means that this decl is just a friend declaration,
and should not be added to the list of members for this class. */
#define DECL_FRIEND_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.friend_attr)
/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */
#define DECL_BEFRIENDING_CLASSES(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* Nonzero for FUNCTION_DECL means that this decl is a static
member function. */
#define DECL_STATIC_FUNCTION_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->static_function)
/* Nonzero for FUNCTION_DECL means that this decl is a non-static
member function. */
#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
(TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
/* Nonzero for FUNCTION_DECL means that this decl is a member function
(static or non-static). */
#define DECL_FUNCTION_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as const X *const. */
#define DECL_CONST_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as volatile X *const. */
#define DECL_VOLATILE_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for a DECL means that this member is a non-static member. */
#define DECL_NONSTATIC_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL)
/* Nonzero for _DECL means that this member object type
is mutable. */
#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE))
/* Nonzero for _DECL means that this constructor or conversion function is
non-converting. */
#define DECL_NONCONVERTING_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->nonconverting)
/* Nonzero for FUNCTION_DECL means that this member function is a pure
virtual function. */
#define DECL_PURE_VIRTUAL_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pure_virtual)
/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
invalid overrider for a function from a base class. Once we have
complained about an invalid overrider we avoid complaining about it
again. */
#define DECL_INVALID_OVERRIDER_P(NODE) \
(DECL_LANG_FLAG_4 (NODE))
/* True (in a FUNCTION_DECL) if NODE is a function declared with
an override virt-specifier */
#define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE))
/* The thunks associated with NODE, a FUNCTION_DECL. */
#define DECL_THUNKS(NODE) \
(DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set DECL_THUNKS. */
#define SET_DECL_THUNKS(NODE,THUNKS) \
(LANG_DECL_FN_CHECK (NODE)->context = (THUNKS))
/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this
is the base it inherits from. */
#define DECL_INHERITED_CTOR_BASE(NODE) \
(DECL_CONSTRUCTOR_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set the inherited base. */
#define SET_DECL_INHERITED_CTOR_BASE(NODE,INH) \
(LANG_DECL_FN_CHECK (NODE)->context = (INH))
/* Nonzero if NODE is a thunk, rather than an ordinary function. */
#define DECL_THUNK_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_LANG_SPECIFIC (NODE) \
&& LANG_DECL_FN_CHECK (NODE)->thunk_p)
/* Set DECL_THUNK_P for node. */
#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \
(LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \
LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING))
/* Nonzero if NODE is a this pointer adjusting thunk. */
#define DECL_THIS_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a result pointer adjusting thunk. */
#define DECL_RESULT_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */
#define DECL_NON_THUNK_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE))
/* Nonzero if NODE is `extern "C"'. */
#define DECL_EXTERN_C_P(NODE) \
(DECL_LANGUAGE (NODE) == lang_c)
/* Nonzero if NODE is an `extern "C"' function. */
#define DECL_EXTERN_C_FUNCTION_P(NODE) \
(DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE))
/* True iff DECL is an entity with vague linkage whose definition is
available in this translation unit. */
#define DECL_REPO_AVAILABLE_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p)
/* True if DECL is declared 'constexpr'. */
#define DECL_DECLARED_CONSTEXPR_P(DECL) \
DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL)))
/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a
template function. */
#define DECL_PRETTY_FUNCTION_P(NODE) \
(DECL_NAME (NODE) \
&& !strcmp (IDENTIFIER_POINTER (DECL_NAME (NODE)), "__PRETTY_FUNCTION__"))
/* Nonzero if the thread-local variable was declared with __thread
as opposed to thread_local. */
#define DECL_GNU_TLS_P(NODE) \
(TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
/* The _TYPE context in which this _DECL appears. This field holds the
class where a virtual function instance is actually defined. */
#define DECL_CLASS_CONTEXT(NODE) \
(DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE)
/* For a non-member friend function, the class (if any) in which this
friend was defined. For example, given:
struct S { friend void f (); };
the DECL_FRIEND_CONTEXT for `f' will be `S'. */
#define DECL_FRIEND_CONTEXT(NODE) \
((DECL_DECLARES_FUNCTION_P (NODE) \
&& DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->context \
: NULL_TREE)
/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */
#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \
(LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT))
#define CP_DECL_CONTEXT(NODE) \
(!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
#define CP_TYPE_CONTEXT(NODE) \
(!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
#define FROB_CONTEXT(NODE) \
((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE))
/* 1 iff NODE has namespace scope, including the global namespace. */
#define DECL_NAMESPACE_SCOPE_P(NODE) \
(!DECL_TEMPLATE_PARM_P (NODE) \
&& TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL)
#define TYPE_NAMESPACE_SCOPE_P(NODE) \
(TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL)
#define NAMESPACE_SCOPE_P(NODE) \
((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \
|| (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE)))
/* 1 iff NODE is a class member. */
#define DECL_CLASS_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE)))
#define TYPE_CLASS_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE)))
/* 1 iff NODE is function-local. */
#define DECL_FUNCTION_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) \
&& TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
#define TYPE_FUNCTION_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL)
/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for
both the primary typeinfo object and the associated NTBS name. */
#define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))
/* 1 iff VAR_DECL node NODE is virtual table or VTT. */
#define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */
#define FUNCTION_REF_QUALIFIED(NODE) \
TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */
#define FUNCTION_RVALUE_QUALIFIED(NODE) \
TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE))
/* Returns 1 iff VAR_DECL is a construction virtual table.
DECL_VTABLE_OR_VTT_P will be true in this case and must be checked
before using this macro. */
#define DECL_CONSTRUCTION_VTABLE_P(NODE) \
TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))
/* 1 iff NODE is function-local, but for types. */
#define LOCAL_CLASS_P(NODE) \
(decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE)
/* For a NAMESPACE_DECL: the list of using namespace directives
The PURPOSE is the used namespace, the value is the namespace
that is the common ancestor. */
#define DECL_NAMESPACE_USING(NODE) (LANG_DECL_NS_CHECK (NODE)->ns_using)
/* In a NAMESPACE_DECL, the DECL_INITIAL is used to record all users
of a namespace, to record the transitive closure of using namespace. */
#define DECL_NAMESPACE_USERS(NODE) (LANG_DECL_NS_CHECK (NODE)->ns_users)
/* In a NAMESPACE_DECL, the list of namespaces which have associated
themselves with this one. */
#define DECL_NAMESPACE_ASSOCIATIONS(NODE) \
DECL_INITIAL (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, points to the original namespace if this is
a namespace alias. */
#define DECL_NAMESPACE_ALIAS(NODE) \
DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE))
#define ORIGINAL_NAMESPACE(NODE) \
(DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE))
/* Nonzero if NODE is the std namespace. */
#define DECL_NAMESPACE_STD_P(NODE) \
(TREE_CODE (NODE) == NAMESPACE_DECL \
&& CP_DECL_CONTEXT (NODE) == global_namespace \
&& DECL_NAME (NODE) == std_identifier)
/* In a TREE_LIST concatenating using directives, indicate indirect
directives */
#define TREE_INDIRECT_USING(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in an attribute list, indicates that the attribute
must be applied at instantiation time. */
#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag
was inherited from a template parameter, not explicitly indicated. */
#define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
extern tree decl_shadowed_for_var_lookup (tree);
extern void decl_shadowed_for_var_insert (tree, tree);
/* Non zero if this is a using decl for a dependent scope. */
#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
/* The scope named in a using decl. */
#define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE))
/* The decls named by a using decl. */
#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
/* Non zero if the using decl refers to a dependent type. */
#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE))
/* In a VAR_DECL, true if we have a shadowed local variable
in the shadowed var table for this VAR_DECL. */
#define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \
(VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p)
/* In a VAR_DECL for a variable declared in a for statement,
this is the shadowed (local) variable. */
#define DECL_SHADOWED_FOR_VAR(NODE) \
(DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL)
#define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \
(decl_shadowed_for_var_insert (NODE, VAL))
/* In a FUNCTION_DECL, this is nonzero if this function was defined in
the class definition. We have saved away the text of the function,
but have not yet processed it. */
#define DECL_PENDING_INLINE_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pending_inline_p)
/* If DECL_PENDING_INLINE_P holds, this is the saved text of the
function. */
#define DECL_PENDING_INLINE_INFO(NODE) \
(LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info)
/* Nonzero for TYPE_DECL means that it was written 'using name = type'. */
#define TYPE_DECL_ALIAS_P(NODE) \
DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE))
/* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */
#define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \
DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a type which is an alias for another type; i.e, a type
which declaration was written 'using name-of-type =
another-type'. */
#define TYPE_ALIAS_P(NODE) \
(TYPE_P (NODE) \
&& TYPE_NAME (NODE) \
&& TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \
&& TYPE_DECL_ALIAS_P (TYPE_NAME (NODE)))
/* For a class type: if this structure has many fields, we'll sort them
and put them into a TREE_VEC. */
#define CLASSTYPE_SORTED_FIELDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->sorted_fields)
/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or
TEMPLATE_DECL, the entity is either a template specialization (if
DECL_USE_TEMPLATE is nonzero) or the abstract instance of the
template itself.
In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose
TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a
specialization or abstract instance. The TREE_VALUE is the
template arguments used to specialize the template.
Consider:
template <typename T> struct S { friend void f(T) {} };
In this case, S<int>::f is, from the point of view of the compiler,
an instantiation of a template -- but, from the point of view of
the language, each instantiation of S results in a wholly unrelated
global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f
will be non-NULL, but DECL_USE_TEMPLATE will be zero. */
#define DECL_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \
->u.min.template_info)
/* For a VAR_DECL, indicates that the variable is actually a
non-static data member of anonymous union that has been promoted to
variable status. */
#define DECL_ANON_UNION_VAR_P(NODE) \
(DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)))
/* Template information for a RECORD_TYPE or UNION_TYPE. */
#define CLASSTYPE_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (RECORD_OR_UNION_CHECK (NODE))->template_info)
/* Template information for an ENUMERAL_TYPE. Although an enumeration may
not be a primary template, it may be declared within the scope of a
primary template and the enumeration constants may depend on
non-type template parameters. */
#define ENUM_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (ENUMERAL_TYPE_CHECK (NODE)))
/* Template information for a template template parameter. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)) \
->template_info)
/* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or
BOUND_TEMPLATE_TEMPLATE_PARM type. Note that if NODE is a
specialization of an alias template, this accessor returns the
template info for the alias template, not the one (if any) for the
template of the underlying type. */
#define TYPE_TEMPLATE_INFO(NODE) \
((TYPE_ALIAS_P (NODE) && DECL_LANG_SPECIFIC (TYPE_NAME (NODE))) \
? (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \
: NULL_TREE) \
: ((TREE_CODE (NODE) == ENUMERAL_TYPE) \
? ENUM_TEMPLATE_INFO (NODE) \
: ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (NODE) \
: (CLASS_TYPE_P (NODE) \
? CLASSTYPE_TEMPLATE_INFO (NODE) \
: NULL_TREE))))
/* Set the template information for an ENUMERAL_, RECORD_, or
UNION_TYPE to VAL. */
#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
? (ENUM_TEMPLATE_INFO (NODE) = (VAL)) \
: ((CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \
? (CLASSTYPE_TEMPLATE_INFO (NODE) = (VAL)) \
: (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL))))
#define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE))
#define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE))
#define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE)
/* For a given TREE_VEC containing a template argument list,
this property contains the number of arguments that are not
defaulted. */
#define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE))
/* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT
property. */
#define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE)
#ifdef ENABLE_CHECKING
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE))
#else
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \
? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \
: TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE))
#endif
/* The list of typedefs - used in the template - that need
access checking at template instantiation time.
FIXME this should be associated with the TEMPLATE_DECL, not the
TEMPLATE_INFO. */
#define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \
((struct tree_template_info*)TEMPLATE_INFO_CHECK \
(NODE))->typedefs_needing_access_checking
/* We use TREE_VECs to hold template arguments. If there is only one
level of template arguments, then the TREE_VEC contains the
arguments directly. If there is more than one level of template
arguments, then each entry in the TREE_VEC is itself a TREE_VEC,
containing the template arguments for a single level. The first
entry in the outer TREE_VEC is the outermost level of template
parameters; the last is the innermost.
It is incorrect to ever form a template argument vector containing
only one level of arguments, but which is a TREE_VEC containing as
its only entry the TREE_VEC for that level.
For each TREE_VEC containing the template arguments for a single
level, it's possible to get or set the number of non defaulted
template arguments by using the accessor macros
GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */
/* Nonzero if the template arguments is actually a vector of vectors,
rather than just a vector. */
#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \
(NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \
&& TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC)
/* The depth of a template argument vector. When called directly by
the parser, we use a TREE_LIST rather than a TREE_VEC to represent
template arguments. In fact, we may even see NULL_TREE if there
are no template arguments. In both of those cases, there is only
one level of template arguments. */
#define TMPL_ARGS_DEPTH(NODE) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1)
/* The LEVELth level of the template ARGS. The outermost level of
args is level 1, not level 0. */
#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \
? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS))
/* Set the LEVELth level of the template ARGS to VAL. This macro does
not work with single-level argument vectors. */
#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \
(TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL))
/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */
#define TMPL_ARG(ARGS, LEVEL, IDX) \
(TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX))
/* Given a single level of template arguments in NODE, return the
number of arguments. */
#define NUM_TMPL_ARGS(NODE) \
(TREE_VEC_LENGTH (NODE))
/* Returns the innermost level of template arguments in ARGS. */
#define INNERMOST_TEMPLATE_ARGS(NODE) \
(get_innermost_template_args ((NODE), 1))
/* The number of levels of template parameters given by NODE. */
#define TMPL_PARMS_DEPTH(NODE) \
((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE)))
/* The TEMPLATE_DECL instantiated or specialized by NODE. This
TEMPLATE_DECL will be the immediate parent, not the most general
template. For example, in:
template <class T> struct S { template <class U> void f(U); }
the FUNCTION_DECL for S<int>::f<double> will have, as its
DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'.
As a special case, for a member friend template of a template
class, this value will not be a TEMPLATE_DECL, but rather an
IDENTIFIER_NODE or OVERLOAD indicating the name of the template and
any explicit template arguments provided. For example, in:
template <class T> struct S { friend void f<int>(int, double); }
the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the
DECL_TI_ARGS will be {int}.
For a FIELD_DECL with a non-static data member initializer, this value
is the FIELD_DECL it was instantiated from. */
#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE))
/* The template arguments used to obtain this decl from the most
general form of DECL_TI_TEMPLATE. For the example given for
DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These
are always the full set of arguments required to instantiate this
declaration from the most general template specialized here. */
#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE))
/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE
will be generated from a partial specialization, the TEMPLATE_DECL
referred to here will be the original template. For example,
given:
template <typename T> struct S {};
template <typename T> struct S<T*> {};
the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */
#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE))
#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE))
/* For a template instantiation TYPE, returns the TYPE corresponding
to the primary template. Otherwise returns TYPE itself. */
#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \
((CLASSTYPE_USE_TEMPLATE ((TYPE)) \
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \
? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \
(CLASSTYPE_TI_TEMPLATE ((TYPE))))) \
: (TYPE))
/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */
#define TYPE_TI_TEMPLATE(NODE) \
(TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE)))
/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */
#define TYPE_TI_ARGS(NODE) \
(TI_ARGS (TYPE_TEMPLATE_INFO (NODE)))
#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE)
/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the
sense of [temp.mem]. */
#define DECL_MEMBER_TEMPLATE_P(NODE) \
(DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE)))
/* Nonzero if the NODE corresponds to the template parameters for a
member template, whose inline definition is being processed after
the class definition is complete. */
#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE)
/* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */
#define DECL_PACK_P(NODE) \
(DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE)))
/* Determines if NODE is an expansion of one or more parameter packs,
e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_P(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
|| TREE_CODE (NODE) == EXPR_PACK_EXPANSION)
/* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_PATTERN(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* The list of parameter packs used in the PACK_EXPANSION_* node. The
TREE_VALUE of each TREE_LIST contains the parameter packs. */
#define PACK_EXPANSION_PARAMETER_PACKS(NODE) \
*(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \
? &TREE_OPERAND (NODE, 1) \
: &TYPE_MINVAL (TYPE_PACK_EXPANSION_CHECK (NODE)))
/* Any additional template args to be applied when substituting into
the pattern, set by tsubst_pack_expansion for partial instantiations. */
#define PACK_EXPANSION_EXTRA_ARGS(NODE) \
*(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
? &TYPE_MAXVAL (NODE) \
: &TREE_OPERAND ((NODE), 2))
/* True iff this pack expansion is within a function context. */
#define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* True iff this pack expansion is for sizeof.... */
#define PACK_EXPANSION_SIZEOF_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* Determine if this is an argument pack. */
#define ARGUMENT_PACK_P(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \
|| TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK)
/* The arguments stored in an argument pack. Arguments are stored in a
TREE_VEC, which may have length zero. */
#define ARGUMENT_PACK_ARGS(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Set the arguments stored in an argument pack. VALUE must be a
TREE_VEC. */
#define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* Whether the argument pack is "incomplete", meaning that more
arguments can still be deduced. Incomplete argument packs are only
used when the user has provided an explicit template argument list
for a variadic function template. Some of the explicit template
arguments will be placed into the beginning of the argument pack,
but additional arguments might still be deduced. */
#define ARGUMENT_PACK_INCOMPLETE_P(NODE) \
TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE))
/* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template
arguments used to fill this pack. */
#define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \
TREE_TYPE (ARGUMENT_PACK_ARGS (NODE))
/* In an ARGUMENT_PACK_SELECT, the argument pack from which an
argument will be selected. */
#define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack)
/* In an ARGUMENT_PACK_SELECT, the index of the argument we want to
select. */
#define ARGUMENT_PACK_SELECT_INDEX(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index)
/* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the
ARGUMENT_PACK_SELECT represents. */
#define ARGUMENT_PACK_SELECT_ARG(NODE) \
TREE_VEC_ELT (ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (NODE)), \
ARGUMENT_PACK_SELECT_INDEX (NODE));
/* In a FUNCTION_DECL, the saved language-specific per-function data. */
#define DECL_SAVED_FUNCTION_DATA(NODE) \
(LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \
->u.saved_language_function)
/* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */
#define REFERENCE_REF_P(NODE) \
(INDIRECT_REF_P (NODE) \
&& TREE_TYPE (TREE_OPERAND (NODE, 0)) \
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \
== REFERENCE_TYPE))
/* True if NODE is a REFERENCE_TYPE which is OK to instantiate to be a
reference to VLA type, because it's used for VLA capture. */
#define REFERENCE_VLA_OK(NODE) \
(TYPE_LANG_FLAG_5 (REFERENCE_TYPE_CHECK (NODE)))
#define NEW_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_VEC(NODE) \
TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE))
/* Indicates that this is a non-dependent COMPOUND_EXPR which will
resolve to a function call. */
#define COMPOUND_EXPR_OVERLOADED(NODE) \
TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE))
/* In a CALL_EXPR appearing in a template, true if Koenig lookup
should be performed at instantiation time. */
#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
/* True if CALL_EXPR expresses list-initialization of an object. */
#define CALL_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_3 (TREE_CHECK2 ((NODE),CALL_EXPR,AGGR_INIT_EXPR))
/* Indicates whether a string literal has been parenthesized. Such
usages are disallowed in certain circumstances. */
#define PAREN_STRING_LITERAL_P(NODE) \
TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE))
/* Indicates whether a COMPONENT_REF has been parenthesized, or an
INDIRECT_REF comes from parenthesizing a VAR_DECL. Currently only set
some of the time in C++14 mode. */
#define REF_PARENTHESIZED_P(NODE) \
TREE_LANG_FLAG_2 (TREE_CHECK2 ((NODE), COMPONENT_REF, INDIRECT_REF))
/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a
constructor call, rather than an ordinary function call. */
#define AGGR_INIT_VIA_CTOR_P(NODE) \
TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize
the object. */
#define AGGR_INIT_ZERO_FIRST(NODE) \
TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE))
/* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR
accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of
CALL_EXPR_STATIC_CHAIN). */
#define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1)
#define AGGR_INIT_EXPR_SLOT(NODE) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2)
#define AGGR_INIT_EXPR_ARG(NODE, I) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3)
#define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3)
/* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE.
We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if
the argument count is zero when checking is enabled. Instead, do
the pointer arithmetic to advance past the 3 fixed operands in a
AGGR_INIT_EXPR. That produces a valid pointer to just past the end of
the operand array, even if it's not valid to dereference it. */
#define AGGR_INIT_EXPR_ARGP(NODE) \
(&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3)
/* Abstract iterators for AGGR_INIT_EXPRs. */
/* Structure containing iterator state. */
typedef struct aggr_init_expr_arg_iterator_d {
tree t; /* the aggr_init_expr */
int n; /* argument count */
int i; /* next argument index */
} aggr_init_expr_arg_iterator;
/* Initialize the abstract argument list iterator object ITER with the
arguments from AGGR_INIT_EXPR node EXP. */
inline void
init_aggr_init_expr_arg_iterator (tree exp,
aggr_init_expr_arg_iterator *iter)
{
iter->t = exp;
iter->n = aggr_init_expr_nargs (exp);
iter->i = 0;
}
/* Return the next argument from abstract argument list iterator object ITER,
and advance its state. Return NULL_TREE if there are no more arguments. */
inline tree
next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter)
{
tree result;
if (iter->i >= iter->n)
return NULL_TREE;
result = AGGR_INIT_EXPR_ARG (iter->t, iter->i);
iter->i++;
return result;
}
/* Initialize the abstract argument list iterator object ITER, then advance
past and return the first argument. Useful in for expressions, e.g.
for (arg = first_aggr_init_expr_arg (exp, &iter); arg;
arg = next_aggr_init_expr_arg (&iter)) */
inline tree
first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter)
{
init_aggr_init_expr_arg_iterator (exp, iter);
return next_aggr_init_expr_arg (iter);
}
/* Test whether there are more arguments in abstract argument list iterator
ITER, without changing its state. */
inline bool
more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
{
return (iter->i < iter->n);
}
/* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable
ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */
#define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \
for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \
(arg) = next_aggr_init_expr_arg (&(iter)))
/* VEC_INIT_EXPR accessors. */
#define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0)
#define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1)
/* Indicates that a VEC_INIT_EXPR is a potential constant expression.
Only set when the current function is constexpr. */
#define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \
TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE))
/* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */
#define VEC_INIT_EXPR_VALUE_INIT(NODE) \
TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE))
/* The condition under which this MUST_NOT_THROW_EXPR actually blocks
exceptions. NULL_TREE means 'true'. */
#define MUST_NOT_THROW_COND(NODE) \
TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1)
/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a
TEMPLATE_DECL. This macro determines whether or not a given class
type is really a template type, as opposed to an instantiation or
specialization of one. */
#define CLASSTYPE_IS_TEMPLATE(NODE) \
(CLASSTYPE_TEMPLATE_INFO (NODE) \
&& !CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
/* The name used by the user to name the typename type. Typically,
this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the
corresponding TYPE_DECL. However, this may also be a
TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */
#define TYPENAME_TYPE_FULLNAME(NODE) \
(TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as an "enum". */
#define TYPENAME_IS_ENUM_P(NODE) \
(TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as a "class", "struct", or
"union". */
#define TYPENAME_IS_CLASS_P(NODE) \
(TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE is in the process of being resolved. */
#define TYPENAME_IS_RESOLVING_P(NODE) \
(TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE)))
/* [class.virtual]
A class that declares or inherits a virtual function is called a
polymorphic class. */
#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
/* Nonzero if this class has a virtual function table pointer. */
#define TYPE_CONTAINS_VPTR_P(NODE) \
(TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
/* This flag is true of a local VAR_DECL if it was declared in a for
statement, but we are no longer in the scope of the for. */
#define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE))
/* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL
if we already emitted a warning about using it. */
#define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))
/* Nonzero if NODE is a FUNCTION_DECL (for a function with global
scope) declared in a local scope. */
#define DECL_LOCAL_FUNCTION_P(NODE) \
DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'break' stmts. */
#define LABEL_DECL_BREAK(NODE) \
DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'continue' stmts. */
#define LABEL_DECL_CONTINUE(NODE) \
DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE))
/* True if NODE was declared with auto in its return type, but it has
started compilation and so the return type might have been changed by
return type deduction; its declared return type should be found in
DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */
#define FNDECL_USED_AUTO(NODE) \
TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is a DECL which we know about but which has not
been explicitly declared, such as a built-in function or a friend
declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P
will be set. */
#define DECL_ANTICIPATED(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.anticipated_p)
/* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend
within a class but has not been declared in the surrounding scope.
The function is invisible except via argument dependent lookup. */
#define DECL_HIDDEN_FRIEND_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p)
/* Nonzero if NODE is an artificial FUNCTION_DECL for
#pragma omp declare reduction. */
#define DECL_OMP_DECLARE_REDUCTION_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p)
/* Nonzero if DECL has been declared threadprivate by
#pragma omp threadprivate. */
#define CP_DECL_THREADPRIVATE_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= delete'. */
#define DECL_DELETED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
#define DECL_DEFAULTED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->defaulted_p)
/* Nonzero if DECL is explicitly defaulted in the class body. */
#define DECL_DEFAULTED_IN_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL))
/* Nonzero if DECL was defaulted outside the class body. */
#define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) \
&& !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL)))
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* Returns nonzero if DECL has external linkage, as specified by the
language standard. (This predicate may hold even when the
corresponding entity is not actually given external linkage in the
object file; see decl_linkage for details.) */
#define DECL_EXTERNAL_LINKAGE_P(DECL) \
(decl_linkage (DECL) == lk_external)
/* Keep these codes in ascending code order. */
#define INTEGRAL_CODE_P(CODE) \
((CODE) == ENUMERAL_TYPE \
|| (CODE) == BOOLEAN_TYPE \
|| (CODE) == INTEGER_TYPE)
/* [basic.fundamental]
Types bool, char, wchar_t, and the signed and unsigned integer types
are collectively called integral types.
Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
types as well, which is incorrect in C++. Keep these checks in
ascending code order. */
#define CP_INTEGRAL_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == BOOLEAN_TYPE \
|| TREE_CODE (TYPE) == INTEGER_TYPE)
/* Returns true if TYPE is an integral or enumeration name. Keep
these checks in ascending code order. */
#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE))
/* Returns true if TYPE is an integral or unscoped enumeration type. */
#define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \
(UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE))
/* True if the class type TYPE is a literal type. */
#define CLASSTYPE_LITERAL_P(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->is_literal)
/* [basic.fundamental]
Integral and floating types are collectively called arithmetic
types.
As a GNU extension, we also accept complex types.
Keep these checks in ascending code order. */
#define ARITHMETIC_TYPE_P(TYPE) \
(CP_INTEGRAL_TYPE_P (TYPE) \
|| TREE_CODE (TYPE) == REAL_TYPE \
|| TREE_CODE (TYPE) == COMPLEX_TYPE)
/* True iff TYPE is cv decltype(nullptr). */
#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE)
/* [basic.types]
Arithmetic types, enumeration types, pointer types,
pointer-to-member types, and std::nullptr_t are collectively called
scalar types.
Keep these checks in ascending code order. */
#define SCALAR_TYPE_P(TYPE) \
(TYPE_PTRDATAMEM_P (TYPE) \
|| TREE_CODE (TYPE) == ENUMERAL_TYPE \
|| ARITHMETIC_TYPE_P (TYPE) \
|| TYPE_PTR_P (TYPE) \
|| TYPE_PTRMEMFUNC_P (TYPE) \
|| NULLPTR_TYPE_P (TYPE))
/* Determines whether this type is a C++0x scoped enumeration
type. Scoped enumerations types are introduced via "enum class" or
"enum struct", e.g.,
enum class Color {
Red, Green, Blue
};
Scoped enumeration types are different from normal (unscoped)
enumeration types in several ways:
- The enumerators of a scoped enumeration type are only available
within the scope of the enumeration type and not in the
enclosing scope. For example, the Red color can be referred to
with "Color::Red" but not "Red".
- Scoped enumerators and enumerations do not implicitly convert
to integers or 'bool'.
- The underlying type of the enum is well-defined. */
#define SCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE))
/* Determine whether this is an unscoped enumeration type. */
#define UNSCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE))
/* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped
enumeration type (1) or a normal (unscoped) enumeration type
(0). */
#define SET_SCOPED_ENUM_P(TYPE, VAL) \
(ENUM_IS_SCOPED (TYPE) = (VAL))
#define SET_OPAQUE_ENUM_P(TYPE, VAL) \
(ENUM_IS_OPAQUE (TYPE) = (VAL))
#define OPAQUE_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE))
/* Determines whether an ENUMERAL_TYPE has an explicit
underlying type. */
#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE))
/* Returns the underlying type of the given enumeration type. The
underlying type is determined in different ways, depending on the
properties of the enum:
- In C++0x, the underlying type can be explicitly specified, e.g.,
enum E1 : char { ... } // underlying type is char
- In a C++0x scoped enumeration, the underlying type is int
unless otherwises specified:
enum class E2 { ... } // underlying type is int
- Otherwise, the underlying type is determined based on the
values of the enumerators. In this case, the
ENUM_UNDERLYING_TYPE will not be set until after the definition
of the enumeration is completed by finish_enum. */
#define ENUM_UNDERLYING_TYPE(TYPE) \
TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE))
/* [dcl.init.aggr]
An aggregate is an array or a class with no user-provided
constructors, no brace-or-equal-initializers for non-static data
members, no private or protected non-static data members, no
base classes, and no virtual functions.
As an extension, we also treat vectors as aggregates. Keep these
checks in ascending code order. */
#define CP_AGGREGATE_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == VECTOR_TYPE \
||TREE_CODE (TYPE) == ARRAY_TYPE \
|| (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE)))
/* Nonzero for a class type means that the class type has a
user-declared constructor. */
#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
/* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a
late-specified return type. */
#define TYPE_HAS_LATE_RETURN_TYPE(NODE) \
(TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE)))
/* When appearing in an INDIRECT_REF, it means that the tree structure
underneath is actually a call to a constructor. This is needed
when the constructor must initialize local storage (which can
be automatically destroyed), rather than allowing it to allocate
space from the heap.
When appearing in a SAVE_EXPR, it means that underneath
is a call to a constructor.
When appearing in a CONSTRUCTOR, the expression is a
compound literal.
When appearing in a FIELD_DECL, it means that this field
has been duly initialized in its constructor. */
#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
/* True if NODE is a brace-enclosed initializer. */
#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node)
/* True if NODE is a compound-literal, i.e., a brace-enclosed
initializer cast to a particular type. */
#define COMPOUND_LITERAL_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
&& vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\
&& !TREE_HAS_CONSTRUCTOR (NODE))
/* True if NODE is a init-list used as a direct-initializer, i.e.
B b{1,2}, not B b({1,2}) or B b = {1,2}. */
#define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE)))
/* True if an uninitialized element in NODE should not be treated as
implicitly value-initialized. Only used in constexpr evaluation. */
#define CONSTRUCTOR_NO_IMPLICIT_ZERO(NODE) \
(TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE)))
/* True if this CONSTRUCTOR should not be used as a variable initializer
because it was loaded from a constexpr variable with mutable fields. */
#define CONSTRUCTOR_MUTABLE_POISON(NODE) \
(TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE)))
#define DIRECT_LIST_INIT_P(NODE) \
(BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE))
/* True if NODE represents a conversion for direct-initialization in a
template. Set by perform_implicit_conversion_flags. */
#define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \
(TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* Nonzero means that an object of this type can not be initialized using
an initializer list. */
#define CLASSTYPE_NON_AGGREGATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
#define TYPE_NON_AGGREGATE_CLASS(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
/* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign)
/* Nonzero if there is a non-trivial X::X(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor)
/* Nonzero if there is a non-trivial X::op=(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign)
/* Nonzero if there is a non-trivial X::X(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor)
/* Nonzero if there is no trivial default constructor for this class. */
#define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt)
/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
A destructor is trivial if it is an implicitly declared
destructor and if:
- all of the direct base classes of its class have trivial
destructors,
- for all of the non-static data members of its class that are
of class type (or array thereof), each such class has a
trivial destructor. */
#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
(!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
/* Nonzero for _TYPE node means that this type does not have a trivial
destructor. Therefore, destroying an object of this type will
involve a call to a destructor. This can apply to objects of
ARRAY_TYPE is the type of the elements needs a destructor. */
#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \
(TYPE_LANG_FLAG_4 (NODE))
/* Nonzero for class type means that the default constructor is trivial. */
#define TYPE_HAS_TRIVIAL_DFLT(NODE) \
(TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE))
/* Nonzero for class type means that copy initialization of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \
(TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE))
/* Nonzero for class type means that assignment of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \
(TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE))
/* Returns true if NODE is a pointer-to-data-member. */
#define TYPE_PTRDATAMEM_P(NODE) \
(TREE_CODE (NODE) == OFFSET_TYPE)
/* Returns true if NODE is a pointer. */
#define TYPE_PTR_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE)
/* Returns true if NODE is an object type:
[basic.types]
An object type is a (possibly cv-qualified) type that is not a
function type, not a reference type, and not a void type.
Keep these checks in ascending order, for speed. */
#define TYPE_OBJ_P(NODE) \
(TREE_CODE (NODE) != REFERENCE_TYPE \
&& !VOID_TYPE_P (NODE) \
&& TREE_CODE (NODE) != FUNCTION_TYPE \
&& TREE_CODE (NODE) != METHOD_TYPE)
/* Returns true if NODE is a pointer to an object. Keep these checks
in ascending tree code order. */
#define TYPE_PTROB_P(NODE) \
(TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a reference to an object. Keep these checks
in ascending tree code order. */
#define TYPE_REF_OBJ_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a pointer to an object, or a pointer to
void. Keep these checks in ascending tree code order. */
#define TYPE_PTROBV_P(NODE) \
(TYPE_PTR_P (NODE) \
&& !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \
|| TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE))
/* Returns true if NODE is a pointer to function type. */
#define TYPE_PTRFN_P(NODE) \
(TYPE_PTR_P (NODE) \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a reference to function type. */
#define TYPE_REFFN_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a pointer to member function type. */
#define TYPE_PTRMEMFUNC_P(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_PTRMEMFUNC_FLAG (NODE))
#define TYPE_PTRMEMFUNC_FLAG(NODE) \
(TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE)))
/* Returns true if NODE is a pointer-to-member. */
#define TYPE_PTRMEM_P(NODE) \
(TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
/* Returns true if NODE is a pointer or a pointer-to-member. */
#define TYPE_PTR_OR_PTRMEM_P(NODE) \
(TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE))
/* Indicates when overload resolution may resolve to a pointer to
member function. [expr.unary.op]/3 */
#define PTRMEM_OK_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF))
/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
before using this macro. */
#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
(cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\
cp_type_quals (NODE)))
/* As above, but can be used in places that want an lvalue at the expense
of not necessarily having the correct cv-qualifiers. */
#define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \
(TREE_TYPE (TYPE_FIELDS (NODE)))
/* Returns `A' for a type like `int (A::*)(double)' */
#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \
TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* These are use to manipulate the canonical RECORD_TYPE from the
hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */
#define TYPE_GET_PTRMEMFUNC_TYPE(NODE) \
(TYPE_LANG_SPECIFIC (NODE) ? LANG_TYPE_PTRMEM_CHECK (NODE)->record : NULL)
#define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) \
do { \
if (TYPE_LANG_SPECIFIC (NODE) == NULL) \
{ \
TYPE_LANG_SPECIFIC (NODE) \
= (struct lang_type *) ggc_internal_cleared_alloc \
(sizeof (struct lang_type_ptrmem)); \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.h.is_lang_type_class = 0; \
} \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.record = (VALUE); \
} while (0)
/* For a pointer-to-member type of the form `T X::*', this is `X'.
For a type like `void (X::*)() const', this type is `X', not `const
X'. To get at the `const X' you have to look at the
TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have
type `const X*'. */
#define TYPE_PTRMEM_CLASS_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TYPE_OFFSET_BASETYPE (NODE) \
: TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE))
/* For a pointer-to-member type of the form `T X::*', this is `T'. */
#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TREE_TYPE (NODE) \
: TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for
`X'. */
#define PTRMEM_CST_CLASS(NODE) \
TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE)))
/* For a pointer-to-member constant `X::Y' this is the _DECL for
`Y'. */
#define PTRMEM_CST_MEMBER(NODE) (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member)
/* The expression in question for a TYPEOF_TYPE. */
#define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE)))
/* The type in question for an UNDERLYING_TYPE. */
#define UNDERLYING_TYPE_TYPE(NODE) \
(TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE)))
/* The type in question for BASES. */
#define BASES_TYPE(NODE) \
(TYPE_VALUES_RAW (BASES_CHECK (NODE)))
#define BASES_DIRECT(NODE) \
TREE_LANG_FLAG_0 (BASES_CHECK (NODE))
/* The expression in question for a DECLTYPE_TYPE. */
#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE)))
/* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an
id-expression or a member-access expression. When false, it was
parsed as a full expression. */
#define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \
(DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag
/* These flags indicate that we want different semantics from normal
decltype: lambda capture just drops references, init capture
uses auto semantics, lambda proxies look through implicit dereference. */
#define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \
TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_INIT_CAPTURE(NODE) \
TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \
TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_EXTERN(NODE) \
DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_STATIC(NODE) \
DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a lambda capture
field for an array of runtime bound. */
#define DECL_VLA_CAPTURE_P(NODE) \
DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE))
/* Nonzero for PARM_DECL node means that this is an array function
parameter, i.e, a[] rather than *a. */
#define DECL_ARRAY_PARAMETER_P(NODE) \
DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a base class
of the parent object, as opposed to a member field. */
#define DECL_FIELD_IS_BASE(NODE) \
DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a simple (no
explicit initializer) lambda capture field, making it invisible to
name lookup in unevaluated contexts. */
#define DECL_NORMAL_CAPTURE_P(NODE) \
DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE))
/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
flag for this because "A union for which objects or pointers are
declared is not an anonymous union" [class.union]. */
#define ANON_AGGR_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
#define SET_ANON_AGGR_TYPE_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
/* Nonzero if TYPE is an anonymous union type. */
#define ANON_UNION_TYPE_P(NODE) \
(TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
/* Define fields and accessors for nodes representing declared names. */
#define TYPE_WAS_ANONYMOUS(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous)
/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
/* The format of each node in the DECL_FRIENDLIST is as follows:
The TREE_PURPOSE will be the name of a function, i.e., an
IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose
TREE_VALUEs are friends with the given name. */
#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of
each node is a type; the TREE_VALUE is the access granted for this
DECL in that type. The DECL_ACCESS is set by access declarations.
For example, if a member that would normally be public in a
derived class is made protected, then the derived class and the
protected_access_node will appear in the DECL_ACCESS for the node. */
#define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access)
/* Nonzero if the FUNCTION_DECL is a global constructor. */
#define DECL_GLOBAL_CTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_ctor_p)
/* Nonzero if the FUNCTION_DECL is a global destructor. */
#define DECL_GLOBAL_DTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_dtor_p)
/* Accessor macros for C++ template decl nodes. */
/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
template parameters, with 1 being the outermost set of template
parameters. The TREE_VALUE is a vector, whose elements are the
template parameters at each level. Each element in the vector is a
TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
non-type parameter), or a TYPE_DECL (if the parameter is a type
parameter). The TREE_PURPOSE is the default value, if any. The
TEMPLATE_PARM_INDEX for the parameter is available as the
DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
TYPE_DECL).
FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
tree is converted to C++ class hiearchy. */
#define DECL_TEMPLATE_PARMS(NODE) \
((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments
#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \
INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE))
#define DECL_NTPARMS(NODE) \
TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE))
/* For function, method, class-data templates.
FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
tree is converted to C++ class hiearchy. */
#define DECL_TEMPLATE_RESULT(NODE) \
((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result
/* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS
lists all instantiations and specializations of the function so that
tsubst_friend_function can reassign them to another template if we find
that the namespace-scope template is really a partial instantiation of a
friend template.
For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds
all instantiations and specializations of the class type, including
partial instantiations and partial specializations, so that if we
explicitly specialize a partial instantiation we can walk the list
in maybe_process_partial_specialization and reassign them or complain
as appropriate.
In both cases, the TREE_PURPOSE of each node contains the arguments
used; the TREE_VALUE contains the generated variable. The template
arguments are always complete. For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
the record for the partial specialization will contain, as its
argument list, { {T}, {U*} }, and will be on the
DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template
<class U> struct S1<T>::S2'.
This list is not used for other templates. */
#define DECL_TEMPLATE_INSTANTIATIONS(NODE) \
DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE))
/* For a class template, this list contains the partial
specializations of this template. (Full specializations are not
recorded on this list.) The TREE_PURPOSE holds the arguments used
in the partial specialization (e.g., for `template <class T> struct
S<T*, int>' this will be `T*, int'.) The arguments will also include
any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL
for the partial specialization. The TREE_TYPE is the _TYPE node for
the partial specialization.
This list is not used for other templates. */
#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \
DECL_SIZE (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a DECL which is actually a template parameter. Keep
these checks in ascending tree code order. */
#define DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) \
&& (TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == PARM_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL))
/* Mark NODE as a template parameter. */
#define SET_DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) = 1)
/* Nonzero if NODE is a template template parameter. */
#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE))
/* Nonzero for a DECL that represents a function template. */
#define DECL_FUNCTION_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL)
/* Nonzero for a DECL that represents a class template or alias
template. */
#define DECL_TYPE_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL)
/* Nonzero for a DECL that represents a class template. */
#define DECL_CLASS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a TEMPLATE_DECL that represents an alias template. */
#define DECL_ALIAS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a NODE which declares a type. */
#define DECL_DECLARES_TYPE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE))
/* Nonzero if NODE declares a function. */
#define DECL_DECLARES_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE))
/* Nonzero if NODE is the typedef implicitly generated for a type when
the type is declared. In C++, `struct S {};' is roughly
equivalent to `struct S {}; typedef struct S S;' in C.
DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this
example. In C++, there is a second implicit typedef for each
class, in the scope of `S' itself, so that you can say `S::S'.
DECL_SELF_REFERENCE_P will hold for that second typedef. */
#define DECL_IMPLICIT_TYPEDEF_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE))
#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \
(DECL_LANG_FLAG_2 (NODE) = 1)
#define DECL_SELF_REFERENCE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE))
#define SET_DECL_SELF_REFERENCE_P(NODE) \
(DECL_LANG_FLAG_4 (NODE) = 1)
/* A `primary' template is one that has its own template header and is not
a partial specialization. A member function of a class template is a
template, but not primary. A member template is primary. Friend
templates are primary, too. */
/* Returns the primary template corresponding to these parameters. */
#define DECL_PRIMARY_TEMPLATE(NODE) \
(TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE)))
/* Returns nonzero if NODE is a primary template. */
#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE))
/* Nonzero iff NODE is a specialization of a template. The value
indicates the type of specializations:
1=implicit instantiation
2=partial or explicit specialization, e.g.:
template <> int min<int> (int, int),
3=explicit instantiation, e.g.:
template int min<int> (int, int);
Note that NODE will be marked as a specialization even if the
template it is instantiating is not a primary template. For
example, given:
template <typename T> struct O {
void f();
struct I {};
};
both O<int>::f and O<int>::I will be marked as instantiations.
If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also
be non-NULL. */
#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template)
/* Like DECL_USE_TEMPLATE, but for class types. */
#define CLASSTYPE_USE_TEMPLATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->use_template)
/* True if NODE is a specialization of a primary template. */
#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \
(CLASS_TYPE_P (NODE) \
&& CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) & 1)
#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
/* Returns true for an explicit or partial specialization of a class
template. */
#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 2)
#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 2)
#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 1)
#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 1)
#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 3)
#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 3)
/* Nonzero if DECL is a friend function which is an instantiation
from the point of view of the compiler, but not from the point of
view of the language. For example given:
template <class T> struct S { friend void f(T) {}; };
the declaration of `void f(int)' generated when S<int> is
instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be
a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */
#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INFO (DECL) && !DECL_USE_TEMPLATE (DECL))
/* Nonzero if DECL is a function generated from a function 'temploid',
i.e. template, member of class template, or dependent friend. */
#define DECL_TEMPLOID_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INSTANTIATION (DECL) \
|| DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL))
/* Nonzero if DECL is either defined implicitly by the compiler or
generated from a temploid. */
#define DECL_GENERATED_P(DECL) \
(DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL))
/* Nonzero iff we are currently processing a declaration for an
entity with its own template parameter list, and which is not a
full specialization. */
#define PROCESSING_REAL_TEMPLATE_DECL_P() \
(processing_template_decl > template_class_depth (current_scope ()))
/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been
instantiated, i.e. its definition has been generated from the
pattern given in the template. */
#define DECL_TEMPLATE_INSTANTIATED(NODE) \
DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
/* We know what we're doing with this decl now. */
#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE)
/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted,
so that assemble_external will work properly. So we have this flag to
tell us whether the decl is really not external.
This flag does not indicate whether or not the decl is defined in the
current translation unit; it indicates whether or not we should emit the
decl at the end of compilation if it is defined and needed. */
#define DECL_NOT_REALLY_EXTERN(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern)
#define DECL_REALLY_EXTERN(NODE) \
(DECL_EXTERNAL (NODE) \
&& (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE)))
/* A thunk is a stub function.
A thunk is an alternate entry point for an ordinary FUNCTION_DECL.
The address of the ordinary FUNCTION_DECL is given by the
DECL_INITIAL, which is always an ADDR_EXPR whose operand is a
FUNCTION_DECL. The job of the thunk is to either adjust the this
pointer before transferring control to the FUNCTION_DECL, or call
FUNCTION_DECL and then adjust the result value. Note, the result
pointer adjusting thunk must perform a call to the thunked
function, (or be implemented via passing some invisible parameter
to the thunked function, which is modified to perform the
adjustment just before returning).
A thunk may perform either, or both, of the following operations:
o Adjust the this or result pointer by a constant offset.
o Adjust the this or result pointer by looking up a vcall or vbase offset
in the vtable.
A this pointer adjusting thunk converts from a base to a derived
class, and hence adds the offsets. A result pointer adjusting thunk
converts from a derived class to a base, and hence subtracts the
offsets. If both operations are performed, then the constant
adjustment is performed first for this pointer adjustment and last
for the result pointer adjustment.
The constant adjustment is given by THUNK_FIXED_OFFSET. If the
vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is
used. For this pointer adjusting thunks, it is the vcall offset
into the vtable. For result pointer adjusting thunks it is the
binfo of the virtual base to convert to. Use that binfo's vbase
offset.
It is possible to have equivalent covariant thunks. These are
distinct virtual covariant thunks whose vbase offsets happen to
have the same value. THUNK_ALIAS is used to pick one as the
canonical thunk, which will get all the this pointer adjusting
thunks attached to it. */
/* An integer indicating how many bytes should be subtracted from the
this or result pointer when this function is called. */
#define THUNK_FIXED_OFFSET(DECL) \
(DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset)
/* A tree indicating how to perform the virtual adjustment. For a this
adjusting thunk it is the number of bytes to be added to the vtable
to find the vcall offset. For a result adjusting thunk, it is the
binfo of the relevant virtual base. If NULL, then there is no
virtual adjust. (The vptr is always located at offset zero from
the this or result pointer.) (If the covariant type is within the
class hierarchy being laid out, the vbase index is not yet known
at the point we need to create the thunks, hence the need to use
binfos.) */
#define THUNK_VIRTUAL_OFFSET(DECL) \
(LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access)
/* A thunk which is equivalent to another thunk. */
#define THUNK_ALIAS(DECL) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info)
/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is
possible for the target to be a thunk too. */
#define THUNK_TARGET(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* True for a SCOPE_REF iff the "template" keyword was used to
indicate that the qualified name denotes a template. */
#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \
(TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE)))
/* True for an OMP_ATOMIC that has dependent parameters. These are stored
as an expr in operand 1, and integer_zero_node in operand 0. */
#define OMP_ATOMIC_DEPENDENT_P(NODE) \
(TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST)
/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
#define OMP_FOR_GIMPLIFYING_P(NODE) \
(TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE)))
/* A language-specific token attached to the OpenMP data clauses to
hold code (or code fragments) related to ctors, dtors, and op=.
See semantics.c for details. */
#define CP_OMP_CLAUSE_INFO(NODE) \
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_LINEAR))
/* Nonzero if this transaction expression's body contains statements. */
#define TRANSACTION_EXPR_IS_STMT(NODE) \
TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE))
/* These macros provide convenient access to the various _STMT nodes
created when parsing template declarations. */
#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1)
#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0)
#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1)
#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0)
/* Nonzero if this try block is a function try block. */
#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE))
#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0)
#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1)
#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE))
/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run
and the VAR_DECL for which this cleanup exists. */
#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0)
#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1)
#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2)
/* IF_STMT accessors. These give access to the condition of the if
statement, the then block of the if statement, and the else block
of the if statement if it exists. */
#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3)
/* WHILE_STMT accessors. These give access to the condition of the
while statement and the body of the while statement, respectively. */
#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
/* DO_STMT accessors. These give access to the condition of the do
statement and the body of the do statement, respectively. */
#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0)
#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1)
/* FOR_STMT accessors. These give access to the init statement,
condition, update expression, and body of the for statement,
respectively. */
#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
/* RANGE_FOR_STMT accessors. These give access to the declarator,
expression, body, and scope of the statement, respectively. */
#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0)
#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1)
#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2)
#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3)
#define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE))
#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3)
/* STMT_EXPR accessor. */
#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0)
/* EXPR_STMT accessor. This gives the expression associated with an
expression statement. */
#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0)
/* True if this TARGET_EXPR was created by build_cplus_new, and so we can
discard it if it isn't useful. */
#define TARGET_EXPR_IMPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR is the result of list-initialization of a
temporary. */
#define TARGET_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR expresses direct-initialization of an object
to be named later. */
#define TARGET_EXPR_DIRECT_INIT_P(NODE) \
TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE))
/* True if EXPR expresses direct-initialization of a TYPE. */
#define DIRECT_INIT_EXPR_P(TYPE,EXPR) \
(TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \
&& same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR)))
/* True if this CONVERT_EXPR is for a conversion to virtual base in
an NSDMI, and should be re-evaluated when used in a constructor. */
#define CONVERT_EXPR_VBASE_PATH(NODE) \
TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE))
/* True if SIZEOF_EXPR argument is type. */
#define SIZEOF_EXPR_TYPE_P(NODE) \
TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE))
/* True if INTEGER_CST is a zero literal seen in function argument list. */
#define LITERAL_ZERO_P(NODE) \
(INTEGER_CST_CHECK (NODE)->base.nothrow_flag)
/* An enumeration of the kind of tags that C++ accepts. */
enum tag_types {
none_type = 0, /* Not a tag type. */
record_type, /* "struct" types. */
class_type, /* "class" types. */
union_type, /* "union" types. */
enum_type, /* "enum" types. */
typename_type /* "typename" types. */
};
/* The various kinds of lvalues we distinguish. */
enum cp_lvalue_kind_flags {
clk_none = 0, /* Things that are not an lvalue. */
clk_ordinary = 1, /* An ordinary lvalue. */
clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */
clk_class = 4, /* A prvalue of class-type. */
clk_bitfield = 8, /* An lvalue for a bit-field. */
clk_packed = 16 /* An lvalue for a packed field. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum cp_lvalue_kind_flags. */
typedef int cp_lvalue_kind;
/* Various kinds of template specialization, instantiation, etc. */
typedef enum tmpl_spec_kind {
tsk_none, /* Not a template at all. */
tsk_invalid_member_spec, /* An explicit member template
specialization, but the enclosing
classes have not all been explicitly
specialized. */
tsk_invalid_expl_inst, /* An explicit instantiation containing
template parameter lists. */
tsk_excessive_parms, /* A template declaration with too many
template parameter lists. */
tsk_insufficient_parms, /* A template declaration with too few
parameter lists. */
tsk_template, /* A template declaration. */
tsk_expl_spec, /* An explicit specialization. */
tsk_expl_inst /* An explicit instantiation. */
} tmpl_spec_kind;
/* The various kinds of access. BINFO_ACCESS depends on these being
two bit quantities. The numerical values are important; they are
used to initialize RTTI data structures, so changing them changes
the ABI. */
typedef enum access_kind {
ak_none = 0, /* Inaccessible. */
ak_public = 1, /* Accessible, as a `public' thing. */
ak_protected = 2, /* Accessible, as a `protected' thing. */
ak_private = 3 /* Accessible, as a `private' thing. */
} access_kind;
/* The various kinds of special functions. If you add to this list,
you should update special_function_p as well. */
typedef enum special_function_kind {
sfk_none = 0, /* Not a special function. This enumeral
must have value zero; see
special_function_p. */
sfk_constructor, /* A constructor. */
sfk_copy_constructor, /* A copy constructor. */
sfk_move_constructor, /* A move constructor. */
sfk_copy_assignment, /* A copy assignment operator. */
sfk_move_assignment, /* A move assignment operator. */
sfk_destructor, /* A destructor. */
sfk_complete_destructor, /* A destructor for complete objects. */
sfk_base_destructor, /* A destructor for base subobjects. */
sfk_deleting_destructor, /* A destructor for complete objects that
deletes the object after it has been
destroyed. */
sfk_conversion, /* A conversion operator. */
sfk_inheriting_constructor /* An inheriting constructor */
} special_function_kind;
/* The various kinds of linkage. From [basic.link],
A name is said to have linkage when it might denote the same
object, reference, function, type, template, namespace or value
as a name introduced in another scope:
-- When a name has external linkage, the entity it denotes can
be referred to from scopes of other translation units or from
other scopes of the same translation unit.
-- When a name has internal linkage, the entity it denotes can
be referred to by names from other scopes in the same
translation unit.
-- When a name has no linkage, the entity it denotes cannot be
referred to by names from other scopes. */
typedef enum linkage_kind {
lk_none, /* No linkage. */
lk_internal, /* Internal linkage. */
lk_external /* External linkage. */
} linkage_kind;
typedef enum duration_kind {
dk_static,
dk_thread,
dk_auto,
dk_dynamic
} duration_kind;
/* Bitmask flags to control type substitution. */
enum tsubst_flags {
tf_none = 0, /* nothing special */
tf_error = 1 << 0, /* give error messages */
tf_warning = 1 << 1, /* give warnings too */
tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
tf_keep_type_decl = 1 << 3, /* retain typedef type decls
(make_typename_type use) */
tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
instantiate_type use) */
tf_user = 1 << 5, /* found template must be a user template
(lookup_template_class use) */
tf_conv = 1 << 6, /* We are determining what kind of
conversion might be permissible,
not actually performing the
conversion. */
tf_decltype = 1 << 7, /* We are the operand of decltype.
Used to implement the special rules
for calls in decltype (5.2.2/11). */
tf_partial = 1 << 8, /* Doing initial explicit argument
substitution in fn_type_unification. */
/* Convenient substitution flags combinations. */
tf_warning_or_error = tf_warning | tf_error
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum tsubst_flags. */
typedef int tsubst_flags_t;
/* The kind of checking we can do looking in a class hierarchy. */
enum base_access_flags {
ba_any = 0, /* Do not check access, allow an ambiguous base,
prefer a non-virtual base */
ba_unique = 1 << 0, /* Must be a unique base. */
ba_check_bit = 1 << 1, /* Check access. */
ba_check = ba_unique | ba_check_bit,
ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum base_access_flags. */
typedef int base_access;
/* The various kinds of access check during parsing. */
typedef enum deferring_kind {
dk_no_deferred = 0, /* Check access immediately */
dk_deferred = 1, /* Deferred check */
dk_no_check = 2 /* No access check */
} deferring_kind;
/* The kind of base we can find, looking in a class hierarchy.
Values <0 indicate we failed. */
typedef enum base_kind {
bk_inaccessible = -3, /* The base is inaccessible */
bk_ambig = -2, /* The base is ambiguous */
bk_not_base = -1, /* It is not a base */
bk_same_type = 0, /* It is the same type */
bk_proper_base = 1, /* It is a proper base */
bk_via_virtual = 2 /* It is a proper base, but via a virtual
path. This might not be the canonical
binfo. */
} base_kind;
/* Node for "pointer to (virtual) function".
This may be distinct from ptr_type_node so gdb can distinguish them. */
#define vfunc_ptr_type_node vtable_entry_type
/* For building calls to `delete'. */
extern GTY(()) tree integer_two_node;
/* The number of function bodies which we are currently processing.
(Zero if we are at namespace scope, one inside the body of a
function, two inside the body of a function in a local class, etc.) */
extern int function_depth;
/* Nonzero if we are inside eq_specializations, which affects comparison of
PARM_DECLs in cp_tree_equal. */
extern int comparing_specializations;
/* In parser.c. */
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. This is a count since operands to
sizeof can be nested. */
extern int cp_unevaluated_operand;
/* in pt.c */
/* These values are used for the `STRICT' parameter to type_unification and
fn_type_unification. Their meanings are described with the
documentation for fn_type_unification. */
typedef enum unification_kind_t {
DEDUCE_CALL,
DEDUCE_CONV,
DEDUCE_EXACT
} unification_kind_t;
/* in class.c */
extern int current_class_depth;
/* An array of all local classes present in this translation unit, in
declaration order. */
extern GTY(()) vec<tree, va_gc> *local_classes;
/* Here's where we control how name mangling takes place. */
/* Cannot use '$' up front, because this confuses gdb
(names beginning with '$' are gdb-local identifiers).
Note that all forms in which the '$' is significant are long enough
for direct indexing (meaning that if we know there is a '$'
at a particular location, we can index into the string at
any other location that provides distinguishing characters). */
/* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler
doesn't allow '.' in symbol names. */
#ifndef NO_DOT_IN_LABEL
#define JOINER '.'
#define AUTO_TEMP_NAME "_.tmp_"
#define VFIELD_BASE ".vf"
#define VFIELD_NAME "_vptr."
#define VFIELD_NAME_FORMAT "_vptr.%s"
#else /* NO_DOT_IN_LABEL */
#ifndef NO_DOLLAR_IN_LABEL
#define JOINER '$'
#define AUTO_TEMP_NAME "_$tmp_"
#define VFIELD_BASE "$vf"
#define VFIELD_NAME "_vptr$"
#define VFIELD_NAME_FORMAT "_vptr$%s"
#else /* NO_DOLLAR_IN_LABEL */
#define AUTO_TEMP_NAME "__tmp_"
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \
sizeof (AUTO_TEMP_NAME) - 1))
#define VTABLE_NAME "__vt_"
#define VTABLE_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
sizeof (VTABLE_NAME) - 1))
#define VFIELD_BASE "__vfb"
#define VFIELD_NAME "__vptr_"
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
sizeof (VFIELD_NAME) - 1))
#define VFIELD_NAME_FORMAT "__vptr_%s"
#endif /* NO_DOLLAR_IN_LABEL */
#endif /* NO_DOT_IN_LABEL */
#define THIS_NAME "this"
#define IN_CHARGE_NAME "__in_chrg"
#define VTBL_PTR_TYPE "__vtbl_ptr_type"
#define VTABLE_DELTA_NAME "__delta"
#define VTABLE_PFN_NAME "__pfn"
#define LAMBDANAME_PREFIX "__lambda"
#define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d"
#define UDLIT_OP_ANSI_PREFIX "operator\"\""
#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s"
#define UDLIT_OP_MANGLED_PREFIX "li"
#define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s"
#define UDLIT_OPER_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), \
UDLIT_OP_ANSI_PREFIX, \
sizeof (UDLIT_OP_ANSI_PREFIX) - 1))
#define UDLIT_OP_SUFFIX(ID_NODE) \
(IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1)
#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
&& IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
&& IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1))
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
/* Nonzero if we're done parsing and into end-of-file activities. */
extern int at_eof;
/* True if note_mangling_alias should enqueue mangling aliases for
later generation, rather than emitting them right away. */
extern bool defer_mangling_aliases;
/* A list of namespace-scope objects which have constructors or
destructors which reside in the global scope. The decl is stored
in the TREE_VALUE slot and the initializer is stored in the
TREE_PURPOSE slot. */
extern GTY(()) tree static_aggregates;
/* Likewise, for thread local storage. */
extern GTY(()) tree tls_aggregates;
enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG };
/* These are uses as bits in flags passed to various functions to
control their behavior. Despite the LOOKUP_ prefix, many of these
do not control name lookup. ??? Functions using these flags should
probably be modified to accept explicit boolean flags for the
behaviors relevant to them. */
/* Check for access violations. */
#define LOOKUP_PROTECT (1 << 0)
#define LOOKUP_NORMAL (LOOKUP_PROTECT)
/* Even if the function found by lookup is a virtual function, it
should be called directly. */
#define LOOKUP_NONVIRTUAL (1 << 1)
/* Non-converting (i.e., "explicit") constructors are not tried. This flag
indicates that we are not performing direct-initialization. */
#define LOOKUP_ONLYCONVERTING (1 << 2)
#define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING)
/* If a temporary is created, it should be created so that it lives
as long as the current variable bindings; otherwise it only lives
until the end of the complete-expression. It also forces
direct-initialization in cases where other parts of the compiler
have already generated a temporary, such as reference
initialization and the catch parameter. */
#define DIRECT_BIND (1 << 3)
/* We're performing a user-defined conversion, so more user-defined
conversions are not permitted (only built-in conversions). */
#define LOOKUP_NO_CONVERSION (1 << 4)
/* The user has explicitly called a destructor. (Therefore, we do
not need to check that the object is non-NULL before calling the
destructor.) */
#define LOOKUP_DESTRUCTOR (1 << 5)
/* Do not permit references to bind to temporaries. */
#define LOOKUP_NO_TEMP_BIND (1 << 6)
/* Do not accept objects, and possibly namespaces. */
#define LOOKUP_PREFER_TYPES (1 << 7)
/* Do not accept objects, and possibly types. */
#define LOOKUP_PREFER_NAMESPACES (1 << 8)
/* Accept types or namespaces. */
#define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES)
/* Return friend declarations and un-declared builtin functions.
(Normally, these entities are registered in the symbol table, but
not found by lookup.) */
#define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1)
/* Prefer that the lvalue be treated as an rvalue. */
#define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1)
/* We're inside an init-list, so narrowing conversions are ill-formed. */
#define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1)
/* We're looking up a constructor for list-initialization. */
#define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1)
/* This is the first parameter of a copy constructor. */
#define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1)
/* We only want to consider list constructors. */
#define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1)
/* Return after determining which function to call and checking access.
Used by sythesized_method_walk to determine which functions will
be called to initialize subobjects, in order to determine exception
specification and possible implicit delete.
This is kind of a hack, but exiting early avoids problems with trying
to perform argument conversions when the class isn't complete yet. */
#define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1)
/* Used by calls from defaulted functions to limit the overload set to avoid
cycles trying to declare them (core issue 1092). */
#define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1)
/* Used in calls to store_init_value to suppress its usual call to
digest_init. */
#define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1)
/* An instantiation with explicit template arguments. */
#define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1)
/* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */
#define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1)
/* Used by case_conversion to disregard non-integral conversions. */
#define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1)
/* Used for delegating constructors in order to diagnose self-delegation. */
#define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1)
#define LOOKUP_NAMESPACES_ONLY(F) \
(((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_TYPES_ONLY(F) \
(!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH)
/* These flags are used by the conversion code.
CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
CONV_STATIC : Perform the explicit conversions for static_cast.
CONV_CONST : Perform the explicit conversions for const_cast.
CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
CONV_PRIVATE : Perform upcasts to private bases.
CONV_FORCE_TEMP : Require a new temporary when converting to the same
aggregate type. */
#define CONV_IMPLICIT 1
#define CONV_STATIC 2
#define CONV_CONST 4
#define CONV_REINTERPRET 8
#define CONV_PRIVATE 16
/* #define CONV_NONCONVERTING 32 */
#define CONV_FORCE_TEMP 64
#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET)
#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP)
/* Used by build_expr_type_conversion to indicate which types are
acceptable as arguments to the expression under consideration. */
#define WANT_INT 1 /* integer types, including bool */
#define WANT_FLOAT 2 /* floating point types */
#define WANT_ENUM 4 /* enumerated types */
#define WANT_POINTER 8 /* pointer types */
#define WANT_NULL 16 /* null pointer constant */
#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */
#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX)
/* Used with comptypes, and related functions, to guide type
comparison. */
#define COMPARE_STRICT 0 /* Just check if the types are the
same. */
#define COMPARE_BASE 1 /* Check to see if the second type is
derived from the first. */
#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in
reverse. */
#define COMPARE_REDECLARATION 4 /* The comparison is being done when
another declaration of an existing
entity is seen. */
#define COMPARE_STRUCTURAL 8 /* The comparison is intended to be
structural. The actual comparison
will be identical to
COMPARE_STRICT. */
/* Used with push_overloaded_decl. */
#define PUSH_GLOBAL 0 /* Push the DECL into namespace scope,
regardless of the current scope. */
#define PUSH_LOCAL 1 /* Push the DECL into the current
scope. */
#define PUSH_USING 2 /* We are pushing this DECL as the
result of a using declaration. */
/* Used with start function. */
#define SF_DEFAULT 0 /* No flags. */
#define SF_PRE_PARSED 1 /* The function declaration has
already been parsed. */
#define SF_INCLASS_INLINE 2 /* The function is an inline, defined
in the class body. */
/* Used with start_decl's initialized parameter. */
#define SD_UNINITIALIZED 0
#define SD_INITIALIZED 1
#define SD_DEFAULTED 2
#define SD_DELETED 3
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2
is derived from TYPE1, or if TYPE2 is a pointer (reference) to a
class derived from the type pointed to (referred to) by TYPE1. */
#define same_or_base_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_BASE)
/* These macros are used to access a TEMPLATE_PARM_INDEX. */
#define TEMPLATE_PARM_INDEX_CAST(NODE) \
((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE))
#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index)
#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level)
#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE))
#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level)
#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl)
#define TEMPLATE_PARM_PARAMETER_PACK(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE)))
/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM,
TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */
#define TEMPLATE_TYPE_PARM_INDEX(NODE) \
(TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \
TEMPLATE_TEMPLATE_PARM, \
BOUND_TEMPLATE_TEMPLATE_PARM)))
#define TEMPLATE_TYPE_IDX(NODE) \
(TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_LEVEL(NODE) \
(TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \
(TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_DECL(NODE) \
(TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \
(TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE)))
/* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */
#define AUTO_IS_DECLTYPE(NODE) \
(TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* These constants can used as bit flags in the process of tree formatting.
TFF_PLAIN_IDENTIFIER: unqualified part of a name.
TFF_SCOPE: include the class and namespace scope of the name.
TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name.
TFF_DECL_SPECIFIERS: print decl-specifiers.
TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with
a class-key (resp. `enum').
TFF_RETURN_TYPE: include function return type.
TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values.
TFF_EXCEPTION_SPECIFICATION: show function exception specification.
TFF_TEMPLATE_HEADER: show the template<...> header in a
template-declaration.
TFF_TEMPLATE_NAME: show only template-name.
TFF_EXPR_IN_PARENS: parenthesize expressions.
TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments.
TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the
top-level entity.
TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments
identical to their defaults.
TFF_NO_TEMPLATE_BINDINGS: do not print information about the template
arguments for a function template specialization.
TFF_POINTER: we are printing a pointer type. */
#define TFF_PLAIN_IDENTIFIER (0)
#define TFF_SCOPE (1)
#define TFF_CHASE_TYPEDEF (1 << 1)
#define TFF_DECL_SPECIFIERS (1 << 2)
#define TFF_CLASS_KEY_OR_ENUM (1 << 3)
#define TFF_RETURN_TYPE (1 << 4)
#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5)
#define TFF_EXCEPTION_SPECIFICATION (1 << 6)
#define TFF_TEMPLATE_HEADER (1 << 7)
#define TFF_TEMPLATE_NAME (1 << 8)
#define TFF_EXPR_IN_PARENS (1 << 9)
#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10)
#define TFF_UNQUALIFIED_NAME (1 << 11)
#define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12)
#define TFF_NO_TEMPLATE_BINDINGS (1 << 13)
#define TFF_POINTER (1 << 14)
/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM
node. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \
((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TYPE_TI_TEMPLATE (NODE) \
: TYPE_NAME (NODE))
/* in lex.c */
extern void init_reswords (void);
typedef struct GTY(()) operator_name_info_t {
/* The IDENTIFIER_NODE for the operator. */
tree identifier;
/* The name of the operator. */
const char *name;
/* The mangled name of the operator. */
const char *mangled_name;
/* The arity of the operator. */
int arity;
} operator_name_info_t;
/* A mapping from tree codes to operator name information. */
extern GTY(()) operator_name_info_t operator_name_info
[(int) MAX_TREE_CODES];
/* Similar, but for assignment operators. */
extern GTY(()) operator_name_info_t assignment_operator_name_info
[(int) MAX_TREE_CODES];
/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL
constants. */
typedef int cp_cv_quals;
/* Non-static member functions have an optional virt-specifier-seq.
There is a VIRT_SPEC value for each virt-specifier.
They can be combined by bitwise-or to form the complete set of
virt-specifiers for a member function. */
enum virt_specifier
{
VIRT_SPEC_UNSPECIFIED = 0x0,
VIRT_SPEC_FINAL = 0x1,
VIRT_SPEC_OVERRIDE = 0x2
};
/* A type-qualifier, or bitmask therefore, using the VIRT_SPEC
constants. */
typedef int cp_virt_specifiers;
/* Wherever there is a function-cv-qual, there could also be a ref-qualifier:
[dcl.fct]
The return type, the parameter-type-list, the ref-qualifier, and
the cv-qualifier-seq, but not the default arguments or the exception
specification, are part of the function type.
REF_QUAL_NONE Ordinary member function with no ref-qualifier
REF_QUAL_LVALUE Member function with the &-ref-qualifier
REF_QUAL_RVALUE Member function with the &&-ref-qualifier */
enum cp_ref_qualifier {
REF_QUAL_NONE = 0,
REF_QUAL_LVALUE = 1,
REF_QUAL_RVALUE = 2
};
/* A storage class. */
typedef enum cp_storage_class {
/* sc_none must be zero so that zeroing a cp_decl_specifier_seq
sets the storage_class field to sc_none. */
sc_none = 0,
sc_auto,
sc_register,
sc_static,
sc_extern,
sc_mutable
} cp_storage_class;
/* An individual decl-specifier. This is used to index the array of
locations for the declspecs in struct cp_decl_specifier_seq
below. */
typedef enum cp_decl_spec {
ds_first,
ds_signed = ds_first,
ds_unsigned,
ds_short,
ds_long,
ds_const,
ds_volatile,
ds_restrict,
ds_inline,
ds_virtual,
ds_explicit,
ds_friend,
ds_typedef,
ds_alias,
ds_constexpr,
ds_complex,
ds_thread,
ds_type_spec,
ds_redefined_builtin_type_spec,
ds_attribute,
ds_std_attribute,
ds_storage_class,
ds_long_long,
ds_last /* This enumerator must always be the last one. */
} cp_decl_spec;
/* A decl-specifier-seq. */
typedef struct cp_decl_specifier_seq {
/* An array of locations for the declaration sepecifiers, indexed by
enum cp_decl_spec_word. */
source_location locations[ds_last];
/* The primary type, if any, given by the decl-specifier-seq.
Modifiers, like "short", "const", and "unsigned" are not
reflected here. This field will be a TYPE, unless a typedef-name
was used, in which case it will be a TYPE_DECL. */
tree type;
/* The attributes, if any, provided with the specifier sequence. */
tree attributes;
/* The c++11 attributes that follows the type specifier. */
tree std_attributes;
/* If non-NULL, a built-in type that the user attempted to redefine
to some other type. */
tree redefined_builtin_type;
/* The storage class specified -- or sc_none if no storage class was
explicitly specified. */
cp_storage_class storage_class;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* True iff TYPE_SPEC defines a class or enum. */
BOOL_BITFIELD type_definition_p : 1;
/* True iff multiple types were (erroneously) specified for this
decl-specifier-seq. */
BOOL_BITFIELD multiple_types_p : 1;
/* True iff multiple storage classes were (erroneously) specified
for this decl-specifier-seq or a combination of a storage class
with a typedef specifier. */
BOOL_BITFIELD conflicting_specifiers_p : 1;
/* True iff at least one decl-specifier was found. */
BOOL_BITFIELD any_specifiers_p : 1;
/* True iff at least one type-specifier was found. */
BOOL_BITFIELD any_type_specifiers_p : 1;
/* True iff "int" was explicitly provided. */
BOOL_BITFIELD explicit_int_p : 1;
/* True iff "__intN" was explicitly provided. */
BOOL_BITFIELD explicit_intN_p : 1;
/* True iff "char" was explicitly provided. */
BOOL_BITFIELD explicit_char_p : 1;
/* True iff ds_thread is set for __thread, not thread_local. */
BOOL_BITFIELD gnu_thread_keyword_p : 1;
/* True iff the type is a decltype. */
BOOL_BITFIELD decltype_p : 1;
} cp_decl_specifier_seq;
/* The various kinds of declarators. */
typedef enum cp_declarator_kind {
cdk_id,
cdk_function,
cdk_array,
cdk_pointer,
cdk_reference,
cdk_ptrmem,
cdk_error
} cp_declarator_kind;
/* A declarator. */
typedef struct cp_declarator cp_declarator;
typedef struct cp_parameter_declarator cp_parameter_declarator;
/* A parameter, before it has been semantically analyzed. */
struct cp_parameter_declarator {
/* The next parameter, or NULL_TREE if none. */
cp_parameter_declarator *next;
/* The decl-specifiers-seq for the parameter. */
cp_decl_specifier_seq decl_specifiers;
/* The declarator for the parameter. */
cp_declarator *declarator;
/* The default-argument expression, or NULL_TREE, if none. */
tree default_argument;
/* True iff this is the first parameter in the list and the
parameter sequence ends with an ellipsis. */
bool ellipsis_p;
};
/* A declarator. */
struct cp_declarator {
/* The kind of declarator. */
ENUM_BITFIELD (cp_declarator_kind) kind : 4;
/* Whether we parsed an ellipsis (`...') just before the declarator,
to indicate this is a parameter pack. */
BOOL_BITFIELD parameter_pack_p : 1;
location_t id_loc; /* Currently only set for cdk_id and cdk_function. */
/* GNU Attributes that apply to this declarator. If the declarator
is a pointer or a reference, these attribute apply to the type
pointed to. */
tree attributes;
/* Standard C++11 attributes that apply to this declarator. If the
declarator is a pointer or a reference, these attributes apply
to the pointer, rather than to the type pointed to. */
tree std_attributes;
/* For all but cdk_id and cdk_error, the contained declarator. For
cdk_id and cdk_error, guaranteed to be NULL. */
cp_declarator *declarator;
union {
/* For identifiers. */
struct {
/* If non-NULL, the qualifying scope (a NAMESPACE_DECL or
*_TYPE) for this identifier. */
tree qualifying_scope;
/* The unqualified name of the entity -- an IDENTIFIER_NODE,
BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */
tree unqualified_name;
/* If this is the name of a function, what kind of special
function (if any). */
special_function_kind sfk;
} id;
/* For functions. */
struct {
/* The parameters to the function as a TREE_LIST of decl/default. */
tree parameters;
/* The cv-qualifiers for the function. */
cp_cv_quals qualifiers;
/* The virt-specifiers for the function. */
cp_virt_specifiers virt_specifiers;
/* The ref-qualifier for the function. */
cp_ref_qualifier ref_qualifier;
/* The exception-specification for the function. */
tree exception_specification;
/* The late-specified return type, if any. */
tree late_return_type;
} function;
/* For arrays. */
struct {
/* The bounds to the array. */
tree bounds;
} array;
/* For cdk_pointer and cdk_ptrmem. */
struct {
/* The cv-qualifiers for the pointer. */
cp_cv_quals qualifiers;
/* For cdk_ptrmem, the class type containing the member. */
tree class_type;
} pointer;
/* For cdk_reference */
struct {
/* The cv-qualifiers for the reference. These qualifiers are
only used to diagnose ill-formed code. */
cp_cv_quals qualifiers;
/* Whether this is an rvalue reference */
bool rvalue_ref;
} reference;
} u;
};
/* A level of template instantiation. */
struct GTY((chain_next ("%h.next"))) tinst_level {
/* The immediately deeper level in the chain. */
struct tinst_level *next;
/* The original node. Can be either a DECL (for a function or static
data member) or a TYPE (for a class), depending on what we were
asked to instantiate. */
tree decl;
/* The location where the template is instantiated. */
location_t locus;
/* errorcount+sorrycount when we pushed this level. */
int errors;
/* True if the location is in a system header. */
bool in_system_header_p;
};
bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec);
/* Return the type of the `this' parameter of FNTYPE. */
inline tree
type_of_this_parm (const_tree fntype)
{
function_args_iterator iter;
gcc_assert (TREE_CODE (fntype) == METHOD_TYPE);
function_args_iter_init (&iter, fntype);
return function_args_iter_cond (&iter);
}
/* Return the class of the `this' parameter of FNTYPE. */
inline tree
class_of_this_parm (const_tree fntype)
{
return TREE_TYPE (type_of_this_parm (fntype));
}
/* True if T designates a variable template declaration. */
inline bool
variable_template_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (!PRIMARY_TEMPLATE_P (t))
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_P (r);
return false;
}
/* A parameter list indicating for a function with no parameters,
e.g "int f(void)". */
extern cp_parameter_declarator *no_parameters;
/* True if we saw "#pragma GCC java_exceptions". */
extern bool pragma_java_exceptions;
/* in call.c */
extern bool check_dtor_name (tree, tree);
bool magic_varargs_p (tree);
extern tree build_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_addr_func (tree, tsubst_flags_t);
extern void set_flags_from_callee (tree);
extern tree build_call_a (tree, int, tree*);
extern tree build_call_n (tree, int, ...);
extern bool null_ptr_cst_p (tree);
extern bool null_member_pointer_value_p (tree);
extern bool sufficient_parms_p (const_tree);
extern tree type_decays_to (tree);
extern tree build_user_type_conversion (tree, tree, int,
tsubst_flags_t);
extern tree build_new_function_call (tree, vec<tree, va_gc> **, bool,
tsubst_flags_t);
extern tree build_operator_new_call (tree, vec<tree, va_gc> **, tree *,
tree *, tree, tree *,
tsubst_flags_t);
extern tree build_new_method_call (tree, tree, vec<tree, va_gc> **,
tree, int, tree *,
tsubst_flags_t);
extern tree build_special_member_call (tree, tree, vec<tree, va_gc> **,
tree, int, tsubst_flags_t);
extern tree build_new_op (location_t, enum tree_code,
int, tree, tree, tree, tree *,
tsubst_flags_t);
extern tree build_op_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool non_placement_deallocation_fn_p (tree);
extern tree build_op_delete_call (enum tree_code, tree, tree,
bool, tree, tree,
tsubst_flags_t);
extern bool can_convert (tree, tree, tsubst_flags_t);
extern bool can_convert_standard (tree, tree, tsubst_flags_t);
extern bool can_convert_arg (tree, tree, tree, int,
tsubst_flags_t);
extern bool can_convert_arg_bad (tree, tree, tree, int,
tsubst_flags_t);
extern bool enforce_access (tree, tree, tree,
tsubst_flags_t);
extern void push_defarg_context (tree);
extern void pop_defarg_context (void);
extern tree convert_default_arg (tree, tree, tree, int,
tsubst_flags_t);
extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t);
extern tree build_x_va_arg (source_location, tree, tree);
extern tree cxx_type_promotes_to (tree);
extern tree type_passed_as (tree);
extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t);
extern bool is_properly_derived_from (tree, tree);
extern tree initialize_reference (tree, tree, int,
tsubst_flags_t);
extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**);
extern tree make_temporary_var_for_ref_to_temp (tree, tree);
extern bool type_has_extended_temps (tree);
extern tree strip_top_quals (tree);
extern bool reference_related_p (tree, tree);
extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t);
extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int);
extern tree build_integral_nontype_arg_conv (tree, tree, tsubst_flags_t);
extern tree perform_direct_initialization_if_possible (tree, tree, bool,
tsubst_flags_t);
extern tree in_charge_arg_for_name (tree);
extern tree build_cxx_call (tree, int, tree *,
tsubst_flags_t);
extern bool is_std_init_list (tree);
extern bool is_list_ctor (tree);
#ifdef ENABLE_CHECKING
extern void validate_conversion_obstack (void);
#endif /* ENABLE_CHECKING */
extern void mark_versions_used (tree);
extern tree get_function_version_dispatcher (tree);
/* in class.c */
extern tree build_vfield_ref (tree, tree);
extern tree build_base_path (enum tree_code, tree,
tree, int, tsubst_flags_t);
extern tree convert_to_base (tree, tree, bool, bool,
tsubst_flags_t);
extern tree convert_to_base_statically (tree, tree);
extern tree build_vtbl_ref (tree, tree);
extern tree build_vfn_ref (tree, tree);
extern tree get_vtable_decl (tree, int);
extern void resort_type_method_vec (void *, void *,
gt_pointer_operator, void *);
extern bool add_method (tree, tree, tree);
extern bool currently_open_class (tree);
extern tree currently_open_derived_class (tree);
extern tree outermost_open_class (void);
extern tree current_nonlambda_class_type (void);
extern tree finish_struct (tree, tree);
extern void finish_struct_1 (tree);
extern int resolves_to_fixed_type_p (tree, int *);
extern void init_class_processing (void);
extern int is_empty_class (tree);
extern bool is_really_empty_class (tree);
extern void pushclass (tree);
extern void popclass (void);
extern void push_nested_class (tree);
extern void pop_nested_class (void);
extern int current_lang_depth (void);
extern void push_lang_context (tree);
extern void pop_lang_context (void);
extern tree instantiate_type (tree, tree, tsubst_flags_t);
extern void print_class_statistics (void);
extern void build_self_reference (void);
extern int same_signature_p (const_tree, const_tree);
extern void maybe_add_class_template_decl_list (tree, tree, int);
extern void unreverse_member_declarations (tree);
extern void invalidate_class_lookup_cache (void);
extern void maybe_note_name_used_in_class (tree, tree);
extern void note_name_declared_in_class (tree, tree);
extern tree get_vtbl_decl_for_binfo (tree);
extern void debug_class (tree);
extern void debug_thunks (tree);
extern void set_linkage_according_to_type (tree, tree);
extern void determine_key_method (tree);
extern void check_for_override (tree, tree);
extern void push_class_stack (void);
extern void pop_class_stack (void);
extern bool type_has_user_nondefault_constructor (tree);
extern tree in_class_defaulted_default_constructor (tree);
extern bool user_provided_p (tree);
extern bool type_has_user_provided_constructor (tree);
extern bool type_has_non_user_provided_default_constructor (tree);
extern bool vbase_has_user_provided_move_assign (tree);
extern tree default_init_uninitialized_part (tree);
extern bool trivial_default_constructor_is_constexpr (tree);
extern bool type_has_constexpr_default_constructor (tree);
extern bool type_has_virtual_destructor (tree);
extern bool type_has_move_constructor (tree);
extern bool type_has_move_assign (tree);
extern bool type_has_user_declared_move_constructor (tree);
extern bool type_has_user_declared_move_assign(tree);
extern bool type_build_ctor_call (tree);
extern bool type_build_dtor_call (tree);
extern void explain_non_literal_class (tree);
extern void inherit_targ_abi_tags (tree);
extern void defaulted_late_check (tree);
extern bool defaultable_fn_check (tree);
extern void check_abi_tags (tree);
extern void fixup_type_variants (tree);
extern void fixup_attribute_variants (tree);
extern tree* decl_cloned_function_p (const_tree, bool);
extern void clone_function_decl (tree, int);
extern void adjust_clone_args (tree);
extern void deduce_noexcept_on_destructor (tree);
extern void insert_late_enum_def_into_classtype_sorted_fields (tree, tree);
extern bool uniquely_derived_from_p (tree, tree);
extern bool publicly_uniquely_derived_p (tree, tree);
extern tree common_enclosing_class (tree, tree);
/* in cvt.c */
extern tree convert_to_reference (tree, tree, int, int, tree,
tsubst_flags_t);
extern tree convert_from_reference (tree);
extern tree force_rvalue (tree, tsubst_flags_t);
extern tree ocp_convert (tree, tree, int, int,
tsubst_flags_t);
extern tree cp_convert (tree, tree, tsubst_flags_t);
extern tree cp_convert_and_check (tree, tree, tsubst_flags_t);
extern tree cp_fold_convert (tree, tree);
extern tree convert_to_void (tree, impl_conv_void,
tsubst_flags_t);
extern tree convert_force (tree, tree, int,
tsubst_flags_t);
extern tree build_expr_type_conversion (int, tree, bool);
extern tree type_promotes_to (tree);
extern tree perform_qualification_conversions (tree, tree);
/* in name-lookup.c */
extern tree pushdecl (tree);
extern tree pushdecl_maybe_friend (tree, bool);
extern void maybe_push_cleanup_level (tree);
extern tree pushtag (tree, tree, tag_scope);
extern tree make_anon_name (void);
extern tree pushdecl_top_level_maybe_friend (tree, bool);
extern tree pushdecl_top_level_and_finish (tree, tree);
extern tree check_for_out_of_scope_variable (tree);
extern void dump (cp_binding_level &ref);
extern void dump (cp_binding_level *ptr);
extern void print_other_binding_stack (cp_binding_level *);
extern tree maybe_push_decl (tree);
extern tree current_decl_namespace (void);
/* decl.c */
extern tree poplevel (int, int, int);
extern void cxx_init_decl_processing (void);
enum cp_tree_node_structure_enum cp_tree_node_structure
(union lang_tree_node *);
extern void finish_scope (void);
extern void push_switch (tree);
extern void pop_switch (void);
extern tree make_lambda_name (void);
extern int decls_match (tree, tree);
extern tree duplicate_decls (tree, tree, bool);
extern tree declare_local_label (tree);
extern tree define_label (location_t, tree);
extern void check_goto (tree);
extern bool check_omp_return (void);
extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t);
extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t);
extern tree build_library_fn_ptr (const char *, tree, int);
extern tree build_cp_library_fn_ptr (const char *, tree, int);
extern tree push_library_fn (tree, tree, tree, int);
extern tree push_void_library_fn (tree, tree, int);
extern tree push_throw_library_fn (tree, tree);
extern void warn_misplaced_attr_for_class_type (source_location location,
tree class_type);
extern tree check_tag_decl (cp_decl_specifier_seq *, bool);
extern tree shadow_tag (cp_decl_specifier_seq *);
extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool);
extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *);
extern void start_decl_1 (tree, bool);
extern bool check_array_initializer (tree, tree, tree);
extern void cp_finish_decl (tree, tree, bool, tree, int);
extern int cp_complete_array_type (tree *, tree, bool);
extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t);
extern tree build_ptrmemfunc_type (tree);
extern tree build_ptrmem_type (tree, tree);
/* the grokdeclarator prototype is in decl.h */
extern tree build_this_parm (tree, cp_cv_quals);
extern int copy_fn_p (const_tree);
extern bool move_fn_p (const_tree);
extern bool move_signature_fn_p (const_tree);
extern tree get_scope_of_declarator (const cp_declarator *);
extern void grok_special_member_properties (tree);
extern int grok_ctor_properties (const_tree, const_tree);
extern bool grok_op_properties (tree, bool);
extern tree xref_tag (enum tag_types, tree, tag_scope, bool);
extern tree xref_tag_from_type (tree, tree, tag_scope);
extern bool xref_basetypes (tree, tree);
extern tree start_enum (tree, tree, tree, bool, bool *);
extern void finish_enum_value_list (tree);
extern void finish_enum (tree);
extern void build_enumerator (tree, tree, tree, location_t);
extern tree lookup_enumerator (tree, tree);
extern bool start_preparsed_function (tree, tree, int);
extern bool start_function (cp_decl_specifier_seq *,
const cp_declarator *, tree);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
extern tree outer_curly_brace_block (tree);
extern tree finish_function (int);
extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern void maybe_register_incomplete_var (tree);
extern void maybe_commonize_var (tree);
extern void complete_vars (tree);
extern tree static_fn_type (tree);
extern void revert_static_member_fn (tree);
extern void fixup_anonymous_aggr (tree);
extern tree compute_array_index_type (tree, tree, tsubst_flags_t);
extern tree check_default_argument (tree, tree, tsubst_flags_t);
typedef int (*walk_namespaces_fn) (tree, void *);
extern int walk_namespaces (walk_namespaces_fn,
void *);
extern int wrapup_globals_for_namespace (tree, void *);
extern tree create_implicit_typedef (tree, tree);
extern int local_variable_p (const_tree);
extern tree register_dtor_fn (tree);
extern tmpl_spec_kind current_tmpl_spec_kind (int);
extern tree cp_fname_init (const char *, tree *);
extern tree cxx_builtin_function (tree decl);
extern tree cxx_builtin_function_ext_scope (tree decl);
extern tree check_elaborated_type_specifier (enum tag_types, tree, bool);
extern void warn_extern_redeclared_static (tree, tree);
extern tree cxx_comdat_group (tree);
extern bool cp_missing_noreturn_ok_p (tree);
extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *);
extern tree check_var_type (tree, tree);
extern tree reshape_init (tree, tree, tsubst_flags_t);
extern tree next_initializable_field (tree);
extern tree fndecl_declared_return_type (tree);
extern bool undeduced_auto_decl (tree);
extern void require_deduced_type (tree);
extern bool defer_mark_used_calls;
extern GTY(()) vec<tree, va_gc> *deferred_mark_used_calls;
extern tree finish_case_label (location_t, tree, tree);
extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t);
/* in decl2.c */
extern void note_mangling_alias (tree, tree);
extern void generate_mangling_aliases (void);
extern bool check_java_method (tree);
extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier);
extern tree build_pointer_ptrmemfn_type (tree);
extern tree change_return_type (tree, tree);
extern void maybe_retrofit_in_chrg (tree);
extern void maybe_make_one_only (tree);
extern bool vague_linkage_p (tree);
extern void grokclassfn (tree, tree,
enum overload_flags);
extern tree grok_array_decl (location_t, tree, tree, bool);
extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t);
extern tree check_classfn (tree, tree, tree);
extern void check_member_template (tree);
extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, bool, tree, tree);
extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, tree);
extern tree cp_reconstruct_complex_type (tree, tree);
extern bool attributes_naming_typedef_ok (tree);
extern void cplus_decl_attributes (tree *, tree, int);
extern void finish_anon_union (tree);
extern void cp_write_global_declarations (void);
extern tree coerce_new_type (tree);
extern tree coerce_delete_type (tree);
extern void comdat_linkage (tree);
extern void determine_visibility (tree);
extern void constrain_class_visibility (tree);
extern void reset_type_linkage (tree);
extern void tentative_decl_linkage (tree);
extern void import_export_decl (tree);
extern tree build_cleanup (tree);
extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern void no_linkage_error (tree);
extern void check_default_args (tree);
extern bool mark_used (tree);
extern bool mark_used (tree, tsubst_flags_t);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree);
extern tree get_guard (tree);
extern tree get_guard_cond (tree);
extern tree set_guard (tree);
extern tree get_tls_wrapper_fn (tree);
extern void mark_needed (tree);
extern bool decl_needed_p (tree);
extern void note_vague_linkage_fn (tree);
extern void note_variable_template_instantiation (tree);
extern tree build_artificial_parm (tree, tree);
extern bool possibly_inlined_p (tree);
extern int parm_index (tree);
extern tree vtv_start_verification_constructor_init_function (void);
extern tree vtv_finish_verification_constructor_init_function (tree);
extern bool cp_omp_mappable_type (tree);
/* in error.c */
extern void init_error (void);
extern const char *type_as_string (tree, int);
extern const char *type_as_string_translate (tree, int);
extern const char *decl_as_string (tree, int);
extern const char *decl_as_string_translate (tree, int);
extern const char *decl_as_dwarf_string (tree, int);
extern const char *expr_as_string (tree, int);
extern const char *lang_decl_name (tree, int, bool);
extern const char *lang_decl_dwarf_name (tree, int, bool);
extern const char *language_to_string (enum languages);
extern const char *class_key_or_enum_as_string (tree);
extern void maybe_warn_variadic_templates (void);
extern void maybe_warn_cpp0x (cpp0x_warn_str str);
extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern location_t location_of (tree);
extern void qualified_name_lookup_error (tree, tree, tree,
location_t);
/* in except.c */
extern void init_exception_processing (void);
extern tree expand_start_catch_block (tree);
extern void expand_end_catch_block (void);
extern tree build_exc_ptr (void);
extern tree build_throw (tree);
extern int nothrow_libfn_p (const_tree);
extern void check_handlers (tree);
extern tree finish_noexcept_expr (tree, tsubst_flags_t);
extern bool expr_noexcept_p (tree, tsubst_flags_t);
extern void perform_deferred_noexcept_checks (void);
extern bool nothrow_spec_p (const_tree);
extern bool type_noexcept_p (const_tree);
extern bool type_throw_all_p (const_tree);
extern tree build_noexcept_spec (tree, int);
extern void choose_personality_routine (enum languages);
extern tree build_must_not_throw_expr (tree,tree);
extern tree eh_type_info (tree);
extern tree begin_eh_spec_block (void);
extern void finish_eh_spec_block (tree, tree);
extern tree build_eh_type_type (tree);
extern tree cp_protect_cleanup_actions (void);
extern tree create_try_catch_expr (tree, tree);
/* in expr.c */
extern tree cplus_expand_constant (tree);
extern tree mark_rvalue_use (tree);
extern tree mark_lvalue_use (tree);
extern tree mark_type_use (tree);
extern void mark_exp_read (tree);
/* friend.c */
extern int is_friend (tree, tree);
extern void make_friend_class (tree, tree, bool);
extern void add_friend (tree, tree, bool);
extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool);
/* in init.c */
extern tree expand_member_init (tree);
extern void emit_mem_initializers (tree);
extern tree build_aggr_init (tree, tree, int,
tsubst_flags_t);
extern int is_class_type (tree, int);
extern tree get_type_value (tree);
extern tree build_zero_init (tree, tree, bool);
extern tree build_value_init (tree, tsubst_flags_t);
extern tree build_value_init_noctor (tree, tsubst_flags_t);
extern tree get_nsdmi (tree, bool);
extern tree build_offset_ref (tree, tree, bool,
tsubst_flags_t);
extern tree throw_bad_array_new_length (void);
extern tree build_new (vec<tree, va_gc> **, tree, tree,
vec<tree, va_gc> **, int,
tsubst_flags_t);
extern tree get_temp_regvar (tree, tree);
extern tree build_vec_init (tree, tree, tree, bool, int,
tsubst_flags_t);
extern tree build_delete (tree, tree,
special_function_kind,
int, int, tsubst_flags_t);
extern void push_base_cleanups (void);
extern tree build_vec_delete (tree, tree,
special_function_kind, int,
tsubst_flags_t);
extern tree create_temporary_var (tree);
extern void initialize_vtbl_ptrs (tree);
extern tree build_java_class_ref (tree);
extern tree scalar_constant_value (tree);
extern tree decl_really_constant_value (tree);
extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool);
extern tree build_vtbl_address (tree);
/* in lex.c */
extern void cxx_dup_lang_specific_decl (tree);
extern void yyungetc (int, int);
extern tree unqualified_name_lookup_error (tree);
extern tree unqualified_fn_lookup_error (tree);
extern tree build_lang_decl (enum tree_code, tree, tree);
extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree);
extern void retrofit_lang_decl (tree);
extern tree copy_decl (tree);
extern tree copy_type (tree);
extern tree cxx_make_type (enum tree_code);
extern tree make_class_type (enum tree_code);
extern bool cxx_init (void);
extern void cxx_finish (void);
extern bool in_main_input_context (void);
/* in method.c */
extern void init_method (void);
extern tree make_thunk (tree, bool, tree, tree);
extern void finish_thunk (tree);
extern void use_thunk (tree, bool);
extern bool trivial_fn_p (tree);
extern bool is_trivially_xible (enum tree_code, tree, tree);
extern tree get_defaulted_eh_spec (tree);
extern tree unevaluated_noexcept_spec (void);
extern void after_nsdmi_defaulted_late_checks (tree);
extern bool maybe_explain_implicit_delete (tree);
extern void explain_implicit_non_constexpr (tree);
extern void deduce_inheriting_ctor (tree);
extern void synthesize_method (tree);
extern tree lazily_declare_fn (special_function_kind,
tree);
extern tree skip_artificial_parms_for (const_tree, tree);
extern int num_artificial_parms_for (const_tree);
extern tree make_alias_for (tree, tree);
extern tree get_copy_ctor (tree, tsubst_flags_t);
extern tree get_copy_assign (tree);
extern tree get_default_ctor (tree);
extern tree get_dtor (tree, tsubst_flags_t);
extern tree get_inherited_ctor (tree);
extern tree locate_ctor (tree);
extern tree implicitly_declare_fn (special_function_kind, tree,
bool, tree, tree);
/* In optimize.c */
extern bool maybe_clone_body (tree);
/* In parser.c */
extern tree cp_convert_range_for (tree, tree, tree, bool);
extern bool parsing_nsdmi (void);
extern void inject_this_parameter (tree, cp_cv_quals);
/* in pt.c */
extern bool check_template_shadow (tree);
extern tree get_innermost_template_args (tree, int);
extern void maybe_begin_member_template_processing (tree);
extern void maybe_end_member_template_processing (void);
extern tree finish_member_template_decl (tree);
extern void begin_template_parm_list (void);
extern bool begin_specialization (void);
extern void reset_specialization (void);
extern void end_specialization (void);
extern void begin_explicit_instantiation (void);
extern void end_explicit_instantiation (void);
extern tree check_explicit_specialization (tree, tree, int, int);
extern int num_template_headers_for_class (tree);
extern void check_template_variable (tree);
extern tree make_auto (void);
extern tree make_decltype_auto (void);
extern tree do_auto_deduction (tree, tree, tree);
extern tree type_uses_auto (tree);
extern tree type_uses_auto_or_concept (tree);
extern void append_type_to_template_for_access_check (tree, tree, tree,
location_t);
extern tree convert_generic_types_to_packs (tree, int, int);
extern tree splice_late_return_type (tree, tree);
extern bool is_auto (const_tree);
extern bool is_auto_or_concept (const_tree);
extern tree process_template_parm (tree, location_t, tree,
bool, bool);
extern tree end_template_parm_list (tree);
extern void end_template_decl (void);
extern tree maybe_update_decl_type (tree, tree);
extern bool check_default_tmpl_args (tree, tree, bool, bool, int);
extern tree push_template_decl (tree);
extern tree push_template_decl_real (tree, bool);
extern tree add_inherited_template_parms (tree, tree);
extern bool redeclare_class_template (tree, tree);
extern tree lookup_template_class (tree, tree, tree, tree,
int, tsubst_flags_t);
extern tree lookup_template_function (tree, tree);
extern tree lookup_template_variable (tree, tree);
extern int uses_template_parms (tree);
extern int uses_template_parms_level (tree, int);
extern bool in_template_function (void);
extern tree instantiate_class_template (tree);
extern tree instantiate_template (tree, tree, tsubst_flags_t);
extern tree fn_type_unification (tree, tree, tree,
const tree *, unsigned int,
tree, unification_kind_t, int,
bool, bool);
extern void mark_decl_instantiated (tree, int);
extern int more_specialized_fn (tree, tree, int);
extern void do_decl_instantiation (tree, tree);
extern void do_type_instantiation (tree, tree, tsubst_flags_t);
extern bool always_instantiate_p (tree);
extern void maybe_instantiate_noexcept (tree);
extern tree instantiate_decl (tree, int, bool);
extern int comp_template_parms (const_tree, const_tree);
extern bool uses_parameter_packs (tree);
extern bool template_parameter_pack_p (const_tree);
extern bool function_parameter_pack_p (const_tree);
extern bool function_parameter_expanded_from_pack_p (tree, tree);
extern tree make_pack_expansion (tree);
extern bool check_for_bare_parameter_packs (tree);
extern tree build_template_info (tree, tree);
extern tree get_template_info (const_tree);
extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree);
extern int template_class_depth (tree);
extern int is_specialization_of (tree, tree);
extern bool is_specialization_of_friend (tree, tree);
extern tree get_pattern_parm (tree, tree);
extern int comp_template_args (tree, tree);
extern tree maybe_process_partial_specialization (tree);
extern tree most_specialized_instantiation (tree);
extern void print_candidates (tree);
extern void instantiate_pending_templates (int);
extern tree tsubst_default_argument (tree, tree, tree,
tsubst_flags_t);
extern tree tsubst (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t,
tree, bool, bool);
extern tree most_general_template (tree);
extern tree get_mostly_instantiated_function_type (tree);
extern bool problematic_instantiation_changed (void);
extern void record_last_problematic_instantiation (void);
extern struct tinst_level *current_instantiation(void);
extern bool instantiating_current_function_p (void);
extern tree maybe_get_template_decl_from_type_decl (tree);
extern int processing_template_parmlist;
extern bool dependent_type_p (tree);
extern bool dependent_scope_p (tree);
extern bool any_dependent_template_arguments_p (const_tree);
extern bool dependent_template_p (tree);
extern bool dependent_template_id_p (tree, tree);
extern bool type_dependent_expression_p (tree);
extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *);
extern bool any_type_dependent_elements_p (const_tree);
extern bool type_dependent_expression_p_push (tree);
extern bool value_dependent_expression_p (tree);
extern bool instantiation_dependent_expression_p (tree);
extern bool any_value_dependent_elements_p (const_tree);
extern bool dependent_omp_for_p (tree, tree, tree, tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
extern tree instantiate_non_dependent_expr (tree);
extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t);
extern bool variable_template_specialization_p (tree);
extern bool alias_type_or_template_p (tree);
extern bool alias_template_specialization_p (const_tree);
extern bool dependent_alias_template_spec_p (const_tree);
extern bool explicit_class_specialization_p (tree);
extern bool push_tinst_level (tree);
extern bool push_tinst_level_loc (tree, location_t);
extern void pop_tinst_level (void);
extern struct tinst_level *outermost_tinst_level(void);
extern void init_template_processing (void);
extern void print_template_statistics (void);
bool template_template_parameter_p (const_tree);
bool template_type_parameter_p (const_tree);
extern bool primary_template_instantiation_p (const_tree);
extern tree get_primary_template_innermost_parameters (const_tree);
extern tree get_template_parms_at_level (tree, int);
extern tree get_template_innermost_arguments (const_tree);
extern tree get_template_argument_pack_elems (const_tree);
extern tree get_function_template_decl (const_tree);
extern tree resolve_nondeduced_context (tree);
extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val);
/* in repo.c */
extern void init_repo (void);
extern int repo_emit_p (tree);
extern bool repo_export_class_p (const_tree);
extern void finish_repo (void);
/* in rtti.c */
/* A vector of all tinfo decls that haven't been emitted yet. */
extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls;
extern void init_rtti_processing (void);
extern tree build_typeid (tree, tsubst_flags_t);
extern tree get_tinfo_decl (tree);
extern tree get_typeid (tree, tsubst_flags_t);
extern tree build_headof (tree);
extern tree build_dynamic_cast (tree, tree, tsubst_flags_t);
extern void emit_support_tinfos (void);
extern bool emit_tinfo_decl (tree);
/* in search.c */
extern bool accessible_base_p (tree, tree, bool);
extern tree lookup_base (tree, tree, base_access,
base_kind *, tsubst_flags_t);
extern tree dcast_base_hint (tree, tree);
extern int accessible_p (tree, tree, bool);
extern int accessible_in_template_p (tree, tree);
extern tree lookup_field_1 (tree, tree, bool);
extern tree lookup_field (tree, tree, int, bool);
extern int lookup_fnfields_1 (tree, tree);
extern tree lookup_fnfields_slot (tree, tree);
extern tree lookup_fnfields_slot_nolazy (tree, tree);
extern int class_method_index_for_fn (tree, tree);
extern tree lookup_fnfields (tree, tree, int);
extern tree lookup_member (tree, tree, int, bool,
tsubst_flags_t);
extern int look_for_overrides (tree, tree);
extern void get_pure_virtuals (tree);
extern void maybe_suppress_debug_info (tree);
extern void note_debug_info_needed (tree);
extern void print_search_statistics (void);
extern void reinit_search_statistics (void);
extern tree current_scope (void);
extern int at_function_scope_p (void);
extern bool at_class_scope_p (void);
extern bool at_namespace_scope_p (void);
extern tree context_for_name_lookup (tree);
extern tree lookup_conversions (tree);
extern tree binfo_from_vbase (tree);
extern tree binfo_for_vbase (tree, tree);
extern tree look_for_overrides_here (tree, tree);
#define dfs_skip_bases ((tree)1)
extern tree dfs_walk_all (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree dfs_walk_once (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree binfo_via_virtual (tree, tree);
extern tree build_baselink (tree, tree, tree, tree);
extern tree adjust_result_of_qualified_name_lookup
(tree, tree, tree);
extern tree copied_binfo (tree, tree);
extern tree original_binfo (tree, tree);
extern int shared_member_p (tree);
/* The representation of a deferred access check. */
typedef struct GTY(()) deferred_access_check {
/* The base class in which the declaration is referenced. */
tree binfo;
/* The declaration whose access must be checked. */
tree decl;
/* The declaration that should be used in the error message. */
tree diag_decl;
/* The location of this access. */
location_t loc;
} deferred_access_check;
/* in semantics.c */
extern void push_deferring_access_checks (deferring_kind);
extern void resume_deferring_access_checks (void);
extern void stop_deferring_access_checks (void);
extern void pop_deferring_access_checks (void);
extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void);
extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *);
extern void pop_to_parent_deferring_access_checks (void);
extern bool perform_access_checks (vec<deferred_access_check, va_gc> *,
tsubst_flags_t);
extern bool perform_deferred_access_checks (tsubst_flags_t);
extern bool perform_or_defer_access_check (tree, tree, tree,
tsubst_flags_t);
extern int stmts_are_full_exprs_p (void);
extern void init_cp_semantics (void);
extern tree do_poplevel (tree);
extern void break_maybe_infinite_loop (void);
extern void add_decl_expr (tree);
extern tree maybe_cleanup_point_expr_void (tree);
extern tree finish_expr_stmt (tree);
extern tree begin_if_stmt (void);
extern void finish_if_stmt_cond (tree, tree);
extern tree finish_then_clause (tree);
extern void begin_else_clause (tree);
extern void finish_else_clause (tree);
extern void finish_if_stmt (tree);
extern tree begin_while_stmt (void);
extern void finish_while_stmt_cond (tree, tree, bool);
extern void finish_while_stmt (tree);
extern tree begin_do_stmt (void);
extern void finish_do_body (tree);
extern void finish_do_stmt (tree, tree, bool);
extern tree finish_return_stmt (tree);
extern tree begin_for_scope (tree *);
extern tree begin_for_stmt (tree, tree);
extern void finish_for_init_stmt (tree);
extern void finish_for_cond (tree, tree, bool);
extern void finish_for_expr (tree, tree);
extern void finish_for_stmt (tree);
extern tree begin_range_for_stmt (tree, tree);
extern void finish_range_for_decl (tree, tree, tree);
extern void finish_range_for_stmt (tree);
extern tree finish_break_stmt (void);
extern tree finish_continue_stmt (void);
extern tree begin_switch_stmt (void);
extern void finish_switch_cond (tree, tree);
extern void finish_switch_stmt (tree);
extern tree finish_goto_stmt (tree);
extern tree begin_try_block (void);
extern void finish_try_block (tree);
extern void finish_handler_sequence (tree);
extern tree begin_function_try_block (tree *);
extern void finish_function_try_block (tree);
extern void finish_function_handler_sequence (tree, tree);
extern void finish_cleanup_try_block (tree);
extern tree begin_handler (void);
extern void finish_handler_parms (tree, tree);
extern void finish_handler (tree);
extern void finish_cleanup (tree, tree);
extern bool is_this_parameter (tree);
enum {
BCS_NO_SCOPE = 1,
BCS_TRY_BLOCK = 2,
BCS_FN_BODY = 4
};
extern tree begin_compound_stmt (unsigned int);
extern void finish_compound_stmt (tree);
extern tree finish_asm_stmt (int, tree, tree, tree, tree,
tree);
extern tree finish_label_stmt (tree);
extern void finish_label_decl (tree);
extern tree finish_parenthesized_expr (tree);
extern tree force_paren_expr (tree);
extern tree finish_non_static_data_member (tree, tree, tree);
extern tree begin_stmt_expr (void);
extern tree finish_stmt_expr_expr (tree, tree);
extern tree finish_stmt_expr (tree, bool);
extern tree stmt_expr_value_expr (tree);
bool empty_expr_stmt_p (tree);
extern tree perform_koenig_lookup (tree, vec<tree, va_gc> *,
tsubst_flags_t);
extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool,
bool, tsubst_flags_t);
extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error);
extern tree finish_increment_expr (tree, enum tree_code);
extern tree finish_this_expr (void);
extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t);
extern tree finish_unary_op_expr (location_t, enum tree_code, tree,
tsubst_flags_t);
extern tree finish_compound_literal (tree, tree, tsubst_flags_t);
extern tree finish_fname (tree);
extern void finish_translation_unit (void);
extern tree finish_template_type_parm (tree, tree);
extern tree finish_template_template_parm (tree, tree);
extern tree begin_class_definition (tree);
extern void finish_template_decl (tree);
extern tree finish_template_type (tree, tree, int);
extern tree finish_base_specifier (tree, tree, bool);
extern void finish_member_declaration (tree);
extern bool outer_automatic_var_p (tree);
extern tree process_outer_var_ref (tree, tsubst_flags_t);
extern tree finish_id_expression (tree, tree, tree,
cp_id_kind *,
bool, bool, bool *,
bool, bool, bool, bool,
const char **,
location_t);
extern tree finish_typeof (tree);
extern tree finish_underlying_type (tree);
extern tree calculate_bases (tree);
extern tree finish_bases (tree, bool);
extern tree calculate_direct_bases (tree);
extern tree finish_offsetof (tree, location_t);
extern void finish_decl_cleanup (tree, tree);
extern void finish_eh_cleanup (tree);
extern void emit_associated_thunks (tree);
extern void finish_mem_initializers (tree);
extern tree check_template_template_default_arg (tree);
extern bool expand_or_defer_fn_1 (tree);
extern void expand_or_defer_fn (tree);
extern void add_typedef_to_current_template_for_access_check (tree, tree,
location_t);
extern void check_accessibility_of_qualified_id (tree, tree, tree);
extern tree finish_qualified_id_expr (tree, tree, bool, bool,
bool, bool, tsubst_flags_t);
extern void simplify_aggr_init_expr (tree *);
extern void finalize_nrv (tree *, tree, tree);
extern void note_decl_for_pch (tree);
extern tree omp_reduction_id (enum tree_code, tree, tree);
extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *);
extern void cp_check_omp_declare_reduction (tree);
extern tree finish_omp_clauses (tree);
extern void finish_omp_threadprivate (tree);
extern tree begin_omp_structured_block (void);
extern tree finish_omp_structured_block (tree);
extern tree finish_oacc_data (tree, tree);
extern tree finish_oacc_kernels (tree, tree);
extern tree finish_oacc_parallel (tree, tree);
extern tree begin_omp_parallel (void);
extern tree finish_omp_parallel (tree, tree);
extern tree begin_omp_task (void);
extern tree finish_omp_task (tree, tree);
extern tree finish_omp_for (location_t, enum tree_code,
tree, tree, tree, tree, tree,
tree, tree);
extern void finish_omp_atomic (enum tree_code, enum tree_code,
tree, tree, tree, tree, tree,
bool);
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
extern void finish_omp_taskwait (void);
extern void finish_omp_taskyield (void);
extern void finish_omp_cancel (tree);
extern void finish_omp_cancellation_point (tree);
extern tree begin_transaction_stmt (location_t, tree *, int);
extern void finish_transaction_stmt (tree, tree, int, tree);
extern tree build_transaction_expr (location_t, tree, int, tree);
extern bool cxx_omp_create_clause_info (tree, tree, bool, bool,
bool, bool);
extern tree baselink_for_fns (tree);
extern void finish_static_assert (tree, tree, location_t,
bool);
extern tree finish_decltype_type (tree, bool, tsubst_flags_t);
extern tree finish_trait_expr (enum cp_trait_kind, tree, tree);
extern tree build_lambda_expr (void);
extern tree build_lambda_object (tree);
extern tree begin_lambda_type (tree);
extern tree lambda_capture_field_type (tree, bool);
extern tree lambda_return_type (tree);
extern tree lambda_proxy_type (tree);
extern tree lambda_function (tree);
extern void apply_deduced_return_type (tree, tree);
extern tree add_capture (tree, tree, tree, bool, bool);
extern tree add_default_capture (tree, tree, tree);
extern tree build_capture_proxy (tree);
extern void insert_capture_proxy (tree);
extern void insert_pending_capture_proxies (void);
extern bool is_capture_proxy (tree);
extern bool is_normal_capture_proxy (tree);
extern void register_capture_members (tree);
extern tree lambda_expr_this_capture (tree, bool);
extern tree maybe_resolve_dummy (tree, bool);
extern tree current_nonlambda_function (void);
extern tree nonlambda_method_basetype (void);
extern void maybe_add_lambda_conv_op (tree);
extern bool is_lambda_ignored_entity (tree);
/* in tree.c */
extern int cp_tree_operand_length (const_tree);
void cp_free_lang_data (tree t);
extern tree force_target_expr (tree, tree, tsubst_flags_t);
extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t);
extern void lang_check_failed (const char *, int,
const char *) ATTRIBUTE_NORETURN;
extern tree stabilize_expr (tree, tree *);
extern void stabilize_call (tree, tree *);
extern bool stabilize_init (tree, tree *);
extern tree add_stmt_to_compound (tree, tree);
extern void init_tree (void);
extern bool pod_type_p (const_tree);
extern bool layout_pod_type_p (const_tree);
extern bool std_layout_type_p (const_tree);
extern bool trivial_type_p (const_tree);
extern bool trivially_copyable_p (const_tree);
extern bool scalarish_type_p (const_tree);
extern bool type_has_nontrivial_default_init (const_tree);
extern bool type_has_nontrivial_copy_init (const_tree);
extern bool class_tmpl_impl_spec_p (const_tree);
extern int zero_init_p (const_tree);
extern bool check_abi_tag_redeclaration (const_tree, const_tree, const_tree);
extern bool check_abi_tag_args (tree, tree);
extern tree strip_typedefs (tree);
extern tree strip_typedefs_expr (tree);
extern tree copy_binfo (tree, tree, tree,
tree *, int);
extern int member_p (const_tree);
extern cp_lvalue_kind real_lvalue_p (const_tree);
extern cp_lvalue_kind lvalue_kind (const_tree);
extern bool lvalue_or_rvalue_with_address_p (const_tree);
extern bool xvalue_p (const_tree);
extern bool builtin_valid_in_constant_expr_p (const_tree);
extern tree build_min (enum tree_code, tree, ...);
extern tree build_min_nt_loc (location_t, enum tree_code,
...);
extern tree build_min_non_dep (enum tree_code, tree, ...);
extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *);
extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_aggr_init_expr (tree, tree);
extern tree get_target_expr (tree);
extern tree get_target_expr_sfinae (tree, tsubst_flags_t);
extern tree build_cplus_array_type (tree, tree);
extern tree build_array_of_n_type (tree, int);
extern bool array_of_runtime_bound_p (tree);
extern tree build_array_copy (tree);
extern tree build_vec_init_expr (tree, tree, tsubst_flags_t);
extern void diagnose_non_constexpr_vec_init (tree);
extern tree hash_tree_cons (tree, tree, tree);
extern tree hash_tree_chain (tree, tree);
extern tree build_qualified_name (tree, tree, tree, bool);
extern tree build_ref_qualified_type (tree, cp_ref_qualifier);
extern int is_overloaded_fn (tree);
extern tree dependent_name (tree);
extern tree get_fns (tree);
extern tree get_first_fn (tree);
extern tree ovl_cons (tree, tree);
extern tree build_overload (tree, tree);
extern tree ovl_scope (tree);
extern bool non_static_member_function_p (tree);
extern const char *cxx_printable_name (tree, int);
extern const char *cxx_printable_name_translate (tree, int);
extern tree build_exception_variant (tree, tree);
extern tree bind_template_template_parm (tree, tree);
extern tree array_type_nelts_total (tree);
extern tree array_type_nelts_top (tree);
extern tree break_out_target_exprs (tree);
extern tree build_ctor_subob_ref (tree, tree, tree);
extern tree replace_placeholders (tree, tree);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
extern bool decl_anon_ns_mem_p (const_tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (const_tree);
extern bool really_overloaded_fn (tree);
extern bool cp_tree_equal (tree, tree);
extern tree no_linkage_check (tree, bool);
extern void debug_binfo (tree);
extern tree build_dummy_object (tree);
extern tree maybe_dummy_object (tree, tree *);
extern int is_dummy_object (const_tree);
extern const struct attribute_spec cxx_attribute_table[];
extern tree make_ptrmem_cst (tree, tree);
extern tree cp_build_type_attribute_variant (tree, tree);
extern tree cp_build_reference_type (tree, bool);
extern tree move (tree);
extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t);
#define cp_build_qualified_type(TYPE, QUALS) \
cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
extern bool cv_qualified_p (const_tree);
extern tree cv_unqualified (tree);
extern special_function_kind special_function_p (const_tree);
extern int count_trees (tree);
extern int char_type_p (tree);
extern void verify_stmt_tree (tree);
extern linkage_kind decl_linkage (tree);
extern duration_kind decl_storage_duration (tree);
extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn,
void*, hash_set<tree> *);
#define cp_walk_tree(tp,func,data,pset) \
walk_tree_1 (tp, func, data, pset, cp_walk_subtrees)
#define cp_walk_tree_without_duplicates(tp,func,data) \
walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees)
extern tree fold_if_not_in_template (tree);
extern tree rvalue (tree);
extern tree convert_bitfield_to_declared_type (tree);
extern tree cp_save_expr (tree);
extern bool cast_valid_in_integral_constant_expression_p (tree);
extern bool cxx_type_hash_eq (const_tree, const_tree);
extern void cxx_print_statistics (void);
extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t);
/* in ptree.c */
extern void cxx_print_xnode (FILE *, tree, int);
extern void cxx_print_decl (FILE *, tree, int);
extern void cxx_print_type (FILE *, tree, int);
extern void cxx_print_identifier (FILE *, tree, int);
extern void cxx_print_error_function (diagnostic_context *,
const char *,
struct diagnostic_info *);
/* in typeck.c */
extern bool cxx_mark_addressable (tree);
extern int string_conv_p (const_tree, const_tree, int);
extern tree cp_truthvalue_conversion (tree);
extern tree condition_conversion (tree);
extern tree require_complete_type (tree);
extern tree require_complete_type_sfinae (tree, tsubst_flags_t);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
extern int type_unknown_p (const_tree);
enum { ce_derived, ce_normal, ce_exact };
extern bool comp_except_specs (const_tree, const_tree, int);
extern bool comptypes (tree, tree, int);
extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree);
extern bool compparms (const_tree, const_tree);
extern int comp_cv_qualification (const_tree, const_tree);
extern int comp_cv_qualification (int, int);
extern int comp_cv_qual_signature (tree, tree);
extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool);
extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool);
extern tree cxx_alignas_expr (tree);
extern tree cxx_sizeof_nowarn (tree);
extern tree is_bitfield_expr_with_lowered_type (const_tree);
extern tree unlowered_expr_type (const_tree);
extern tree decay_conversion (tree, tsubst_flags_t);
extern tree build_class_member_access_expr (tree, tree, tree, bool,
tsubst_flags_t);
extern tree finish_class_member_access_expr (tree, tree, bool,
tsubst_flags_t);
extern tree build_x_indirect_ref (location_t, tree,
ref_operator, tsubst_flags_t);
extern tree cp_build_indirect_ref (tree, ref_operator,
tsubst_flags_t);
extern tree build_array_ref (location_t, tree, tree);
extern tree cp_build_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t);
extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
ATTRIBUTE_SENTINEL;
extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_x_binary_op (location_t,
enum tree_code, tree,
enum tree_code, tree,
enum tree_code, tree *,
tsubst_flags_t);
extern tree build_x_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree build_x_unary_op (location_t,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_addr_expr (tree, tsubst_flags_t);
extern tree cp_build_unary_op (enum tree_code, tree, int,
tsubst_flags_t);
extern tree unary_complex_lvalue (enum tree_code, tree);
extern tree build_x_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
tsubst_flags_t);
extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *,
const char *, tsubst_flags_t);
extern tree build_x_compound_expr (location_t, tree, tree,
tsubst_flags_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t);
extern tree build_static_cast (tree, tree, tsubst_flags_t);
extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t);
extern tree build_const_cast (tree, tree, tsubst_flags_t);
extern tree build_c_cast (location_t, tree, tree);
extern tree cp_build_c_cast (tree, tree, tsubst_flags_t);
extern tree build_x_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_modify_expr (tree, enum tree_code, tree,
tsubst_flags_t);
extern tree convert_for_initialization (tree, tree, tree, int,
impl_conv_rhs, tree, int,
tsubst_flags_t);
extern int comp_ptr_ttypes (tree, tree);
extern bool comp_ptr_ttypes_const (tree, tree);
extern bool error_type_p (const_tree);
extern bool ptr_reasonably_similar (const_tree, const_tree);
extern tree build_ptrmemfunc (tree, tree, int, bool,
tsubst_flags_t);
extern int cp_type_quals (const_tree);
extern int type_memfn_quals (const_tree);
extern cp_ref_qualifier type_memfn_rqual (const_tree);
extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier);
extern bool cp_has_mutable_p (const_tree);
extern bool at_least_as_qualified_p (const_tree, const_tree);
extern void cp_apply_type_quals_to_decl (int, tree);
extern tree build_ptrmemfunc1 (tree, tree, tree);
extern void expand_ptrmemfunc_cst (tree, tree *, tree *);
extern tree type_after_usual_arithmetic_conversions (tree, tree);
extern tree common_pointer_type (tree, tree);
extern tree composite_pointer_type (tree, tree, tree, tree,
composite_pointer_operation,
tsubst_flags_t);
extern tree merge_types (tree, tree);
extern tree strip_array_domain (tree);
extern tree check_return_expr (tree, bool *);
extern tree cp_build_binary_op (location_t,
enum tree_code, tree, tree,
tsubst_flags_t);
extern tree build_x_vec_perm_expr (location_t,
tree, tree, tree,
tsubst_flags_t);
#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, true)
extern tree build_simple_component_ref (tree, tree);
extern tree build_ptrmemfunc_access_expr (tree, tree);
extern tree build_address (tree);
extern tree build_nop (tree, tree);
extern tree non_reference (tree);
extern tree lookup_anon_field (tree, tree);
extern bool invalid_nonstatic_memfn_p (tree, tsubst_flags_t);
extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t);
extern tree convert_ptrmem (tree, tree, bool, bool,
tsubst_flags_t);
extern int lvalue_or_else (tree, enum lvalue_use,
tsubst_flags_t);
extern void check_template_keyword (tree);
extern bool check_raw_literal_operator (const_tree decl);
extern bool check_literal_operator_args (const_tree, bool *, bool *);
extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t);
extern tree cp_perform_integral_promotions (tree, tsubst_flags_t);
/* in typeck2.c */
extern void require_complete_eh_spec_types (tree, tree);
extern void cxx_incomplete_type_diagnostic (const_tree, const_tree, diagnostic_t);
#undef cxx_incomplete_type_error
extern void cxx_incomplete_type_error (const_tree, const_tree);
#define cxx_incomplete_type_error(V,T) \
(cxx_incomplete_type_diagnostic ((V), (T), DK_ERROR))
extern void cxx_incomplete_type_inform (const_tree);
extern tree error_not_base_type (tree, tree);
extern tree binfo_or_else (tree, tree);
extern void cxx_readonly_error (tree, enum lvalue_use);
extern void complete_type_check_abstract (tree);
extern int abstract_virtuals_error (tree, tree);
extern int abstract_virtuals_error (abstract_class_use, tree);
extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t);
extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern tree split_nonconstant_init (tree, tree);
extern bool check_narrowing (tree, tree, tsubst_flags_t);
extern tree digest_init (tree, tree, tsubst_flags_t);
extern tree digest_init_flags (tree, tree, int);
extern tree digest_nsdmi_init (tree, tree);
extern tree build_scoped_ref (tree, tree, tree *);
extern tree build_x_arrow (location_t, tree,
tsubst_flags_t);
extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
extern tree build_functional_cast (tree, tree, tsubst_flags_t);
extern tree add_exception_specifier (tree, tree, int);
extern tree merge_exception_specifiers (tree, tree);
/* in mangle.c */
extern bool maybe_remove_implicit_alias (tree);
extern void init_mangle (void);
extern void mangle_decl (tree);
extern const char *mangle_type_string (tree);
extern tree mangle_typeinfo_for_type (tree);
extern tree mangle_typeinfo_string_for_type (tree);
extern tree mangle_vtbl_for_type (tree);
extern tree mangle_vtt_for_type (tree);
extern tree mangle_ctor_vtbl_for_type (tree, tree);
extern tree mangle_thunk (tree, int, tree, tree);
extern tree mangle_conv_op_name_for_type (tree);
extern tree mangle_guard_variable (tree);
extern tree mangle_tls_init_fn (tree);
extern tree mangle_tls_wrapper_fn (tree);
extern bool decl_tls_wrapper_p (tree);
extern tree mangle_ref_init_variable (tree);
extern char * get_mangled_vtable_map_var_name (tree);
extern bool mangle_return_type_p (tree);
/* in dump.c */
extern bool cp_dump_tree (void *, tree);
/* In cp/cp-objcp-common.c. */
extern alias_set_type cxx_get_alias_set (tree);
extern bool cxx_warn_unused_global_decl (const_tree);
extern size_t cp_tree_size (enum tree_code);
extern bool cp_var_mod_type_p (tree, tree);
extern void cxx_initialize_diagnostics (diagnostic_context *);
extern int cxx_types_compatible_p (tree, tree);
extern void init_shadowed_var_for_decl (void);
extern bool cxx_block_may_fallthru (const_tree);
/* in cp-gimplify.c */
extern int cp_gimplify_expr (tree *, gimple_seq *,
gimple_seq *);
extern void cp_genericize (tree);
extern bool cxx_omp_const_qual_no_mutable (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
extern tree cxx_omp_clause_default_ctor (tree, tree, tree);
extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
extern tree cxx_omp_clause_assign_op (tree, tree, tree);
extern tree cxx_omp_clause_dtor (tree, tree);
extern void cxx_omp_finish_clause (tree, gimple_seq *);
extern bool cxx_omp_privatize_by_reference (const_tree);
/* in name-lookup.c */
extern void suggest_alternatives_for (location_t, tree);
extern tree strip_using_decl (tree);
/* in vtable-class-hierarchy.c */
extern void vtv_compute_class_hierarchy_transitive_closure (void);
extern void vtv_generate_init_routine (void);
extern void vtv_save_class_info (tree);
extern void vtv_recover_class_info (void);
extern void vtv_build_vtable_verify_fndecl (void);
/* In cp-cilkplus.c. */
extern bool cpp_validate_cilk_plus_loop (tree);
/* In cp/cp-array-notations.c */
extern tree expand_array_notation_exprs (tree);
bool cilkplus_an_triplet_types_ok_p (location_t, tree, tree, tree,
tree);
/* In constexpr.c */
extern bool literal_type_p (tree);
extern tree register_constexpr_fundef (tree, tree);
extern bool check_constexpr_ctor_body (tree, tree, bool);
extern tree ensure_literal_type_for_constexpr_object (tree);
extern bool potential_constant_expression (tree);
extern bool potential_static_init_expression (tree);
extern bool potential_rvalue_constant_expression (tree);
extern bool require_potential_constant_expression (tree);
extern bool require_potential_rvalue_constant_expression (tree);
extern tree cxx_constant_value (tree, tree = NULL_TREE);
extern tree maybe_constant_value (tree, tree = NULL_TREE);
extern tree maybe_constant_init (tree, tree = NULL_TREE);
extern tree fold_non_dependent_expr (tree);
extern bool is_sub_constant_expr (tree);
extern bool reduced_constant_expression_p (tree);
extern bool is_instantiation_of_constexpr (tree);
extern bool var_in_constexpr_fn (tree);
extern void explain_invalid_constexpr_fn (tree);
extern vec<tree> cx_error_context (void);
/* In c-family/cilk.c */
extern bool cilk_valid_spawn (tree);
/* In cp-ubsan.c */
extern void cp_ubsan_maybe_instrument_member_call (tree);
extern void cp_ubsan_instrument_member_accesses (tree *);
extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree);
extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree);
/* -- end of C++ */
#endif /* ! GCC_CP_TREE_H */
|
GB_binop__div_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__div_uint64
// A.*B function (eWiseMult): GB_AemultB__div_uint64
// A*D function (colscale): GB_AxD__div_uint64
// D*A function (rowscale): GB_DxB__div_uint64
// C+=B function (dense accum): GB_Cdense_accumB__div_uint64
// C+=b function (dense accum): GB_Cdense_accumb__div_uint64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_uint64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_uint64
// C=scalar+B GB_bind1st__div_uint64
// C=scalar+B' GB_bind1st_tran__div_uint64
// C=A+scalar GB_bind2nd__div_uint64
// C=A'+scalar GB_bind2nd_tran__div_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_UNSIGNED (x, y, 64) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT64 || GxB_NO_DIV_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__div_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__div_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__div_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__div_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__div_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__div_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__div_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__div_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__div_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__div_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 64) ; \
}
GrB_Info GB_bind1st_tran__div_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 64) ; \
}
GrB_Info GB_bind2nd_tran__div_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
for-4.c | /* { dg-options "-std=gnu99 -fopenmp" } */
extern void abort (void);
#define M(x, y, z) O(x, y, z)
#define O(x, y, z) x ## _ ## y ## _ ## z
#define F taskloop
#define G taskloop
#define S
#define N(x) M(x, G, normal)
#include "for-2.h"
#undef S
#undef N
#undef F
#undef G
#define F taskloop simd
#define G taskloop_simd
#define S
#define N(x) M(x, G, normal)
#include "for-2.h"
#undef S
#undef N
#undef F
#undef G
int
main ()
{
int err = 0;
#pragma omp parallel reduction(|:err)
#pragma omp single
{
if (test_taskloop_normal ()
|| test_taskloop_simd_normal ())
err = 1;
}
if (err)
abort ();
return 0;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Tracks expected type during expression parsing, for use in code completion.
/// The type is tied to a particular token, all functions that update or consume
/// the type take a start location of the token they are looking at as a
/// parameter. This avoids updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Handles e.g. BaseType{ .D = Tok...
void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType,
const Designation &D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
///
/// The callback should also emit signature help as a side-effect, but only
/// if the completion point has been reached.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
/// Get the expected type associated with this location, if any.
///
/// If the location is a function argument, determining the expected type
/// involves considering all function overloads and the arguments so far.
/// In this case, signature help for these function overloads will be reported
/// as a side-effect (only if the completion point has been reached).
QualType get(SourceLocation Tok) const {
if (!Enabled || Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
bool Enabled;
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
// Does the work necessary to deal with a SYCL kernel lambda. At the moment,
// this just marks the list of lambdas required to name the kernel.
void AddSYCLKernelLambda(const FunctionDecl *FD);
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
/// Increment when we find a reference; decrement when we find an ignored
/// assignment. Ultimately the value is 0 if every reference is an ignored
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
/// This virtual key function only exists to limit the emission of debug info
/// describing the Sema class. GCC and Clang only emit debug info for a class
/// with a vtable when the vtable is emitted. Sema is final and not
/// polymorphic, but the debug info size savings are so significant that it is
/// worth adding a vtable just to take advantage of this optimization.
virtual void anchor();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether deferrable diagnostics should be deferred.
bool DeferDiags = false;
/// RAII class to control scope of DeferDiags.
class DeferDiagsRAII {
Sema &S;
bool SavedDeferDiags = false;
public:
DeferDiagsRAII(Sema &S, bool DeferDiags)
: S(S), SavedDeferDiags(S.DeferDiags) {
S.DeferDiags = DeferDiags;
}
~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; }
};
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void setFunctionHasMustTail();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// Retrieve the current function, if any, that should be analyzed for
/// potential availability violations.
sema::FunctionScopeInfo *getCurFunctionAvailabilityContext();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType getDecltypeForParenthesizedExpr(Expr *E);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
void warnOnReservedIdentifier(const NamedDecl *D);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
QualType &T, SourceLocation Loc,
unsigned FailedFoldDiagID);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
NamedDecl *getShadowedDeclaration(const BindingDecl *D,
const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
bool IsAbstract,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
/// Merge availability attributes for an implementation of
/// an optional protocol requirement.
AMK_OptionalProtocolImplementation
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id,
bool IsUDSuffix);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the
/// correct number of arguments were passed, etc. Returns true if the
/// attribute has been diagnosed.
bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A);
bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Process the attributes before creating an attributed statement. Returns
/// the semantic attributes that have been processed.
void ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesWithRange &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
struct NamedReturnInfo {
const VarDecl *Candidate;
enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable };
Status S;
bool isMoveEligible() const { return S != None; };
bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; }
};
NamedReturnInfo getNamedReturnInfo(Expr *&E, bool ForceCXX2b = false);
NamedReturnInfo getNamedReturnInfo(const VarDecl *VD);
const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info,
QualType ReturnType);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const NamedReturnInfo &NRInfo,
Expr *Value);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
NamedReturnInfo &NRInfo);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// If VD is set but not otherwise used, diagnose, for a parameter or a
/// variable.
void DiagnoseUnusedButSetDecl(const VarDecl *VD);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
TypeSourceInfo *TSI);
ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
ParsedType ParsedTy);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
MultiExprArg CallArgs);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void FilterUsingLookup(Scope *S, LookupResult &lookup);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc,
const LookupResult *R = nullptr,
const UsingDecl *UD = nullptr);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation,
bool IsUsingIfExists);
NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
SourceLocation NameLoc, EnumDecl *ED);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc, const DeclSpec &);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr *> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
// Complete an enum decl, maybe without a scope spec.
bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L,
CXXScopeSpec *SS = nullptr);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool BuildTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc,
bool AllowUnexpandedPack);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
struct DeclareTargetContextInfo {
struct MapInfo {
OMPDeclareTargetDeclAttr::MapTypeTy MT;
SourceLocation Loc;
};
/// Explicitly listed variables and functions in a 'to' or 'link' clause.
llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped;
/// The 'device_type' as parsed from the clause.
OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any;
/// The directive kind, `begin declare target` or `declare target`.
OpenMPDirectiveKind Kind;
/// The directive location.
SourceLocation Loc;
DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc)
: Kind(Kind), Loc(Loc) {}
};
/// Number of nested '#pragma omp declare target' directives.
SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true,
bool SuppressExprDiags = false);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Analyzes and checks a loop nest for use by a loop transformation.
///
/// \param Kind The loop transformation directive kind.
/// \param NumLoops How many nested loops the directive is expecting.
/// \param AStmt Associated statement of the transformation directive.
/// \param LoopHelpers [out] The loop analysis result.
/// \param Body [out] The body code nested in \p NumLoops loop.
/// \param OriginalInits [out] Collection of statements and declarations that
/// must have been executed/declared before entering the
/// loop.
///
/// \return Whether there was any error.
bool checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
Stmt *&Body,
SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
&OriginalInits);
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<StringRef> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Called at the end of target region i.e. '#pragma omp end declare target'.
const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective();
/// Called once a target context is completed, that can be when a
/// '#pragma omp end declare target' was encountered or when a
/// '#pragma omp declare target' without declaration-definition-seq was
/// encountered.
void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to
/// an OpenMP loop directive.
StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '#pragma omp tile' after parsing of its clauses and
/// the associated statement.
StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '#pragma omp unroll' after parsing of its clauses
/// and the associated statement.
StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp interop'.
StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp dispatch' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp masked' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'sizes' clause.
OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'full' clauses.
OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-form 'partial' clauses.
OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'init' clause.
OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
bool IsTarget, bool IsTargetSync,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use' clause.
OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'novariants' clause.
OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nocontext' clause.
OMPClause *ActOnOpenMPNocontextClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'filter' clause.
OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult
ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_PRValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not. In the success case,
/// the statement is rewritten to remove implicit nodes from the return
/// value.
bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA);
private:
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not.
bool checkMustTailAttr(const Stmt *St, const Attr &MTA);
public:
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckMatrixCast - Check type constraints for matrix casts.
// We allow casting between matrixes of the same dimensions i.e. when they
// have the same number of rows and column. Returns true if the cast is
// invalid.
bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
CastKind &Kind);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder
diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID, FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
enum CUDAVariableTarget {
CVT_Device, /// Emitted on device side with a shadow variable on host side
CVT_Host, /// Emitted on host side only
CVT_Both, /// Emitted on both sides with different addresses
CVT_Unified, /// Emitted as a unified address, e.g. managed variables
};
/// Determines whether the given variable is emitted on host or device side.
CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Determines the preferred type of the current function argument, by
/// examining the signatures of all possible overloads.
/// Returns null if unknown or ambiguous, or if code completion is off.
///
/// If the code completion point has been reached, also reports the function
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinArithmeticFence(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
main.c | /****************************************************
* Luis Humberto Sanchez Vaca
* A01638029
*
* Program to calcule area under the curve
* by with trapezoidal rule using OpenMP
* Ex. sin(x) from 0 to 1
*
* To compile:
* gcc -omp main.c -o main -lm
**************************************************/
// Necessary libraries
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(){
// Limits of the function
double min = 0;
double max = 1;
// Number of intervals
int steps = 1000000;
// Interval size
double delta;
// Variables for the result
double partial_result;
double total_result;
// Calculate of delta
delta = (max-min) / steps;
// Initial aproximation
partial_result = (sin(min) + sin(max)) / 2.0;
// Parallelize the calculations
#pragma omp parallel private(partial_result) shared(total_result)
{
#pragma omp for
for (int i=1; i<steps; i++){
// Partial results for each trapezoid
partial_result += sin(min + i * delta);
}
// Create a thread
#pragma omp critical
{
// Sum the thread to the final result
total_result += partial_result;
// Final calulation
total_result *= delta;
}
}
// Display the result
printf("Result is: %lf\n", total_result);
return 0;
} |
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
/// This bit is set only for the Stmts that are the structured-block of
/// OpenMP executable directives. Directives that have a structured block
/// are called "non-standalone" directives.
/// I.e. those returned by OMPExecutableDirective::getStructuredBlock().
unsigned IsOMPStructuredBlock : 1;
};
enum { NumStmtBits = 9 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = NumStmtBits + 9 };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is trail-allocated.
unsigned ResultKind : 2;
/// When ResultKind == RSK_Int64. whether the trail-allocated integer is
/// signed.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated
/// integer. 7 bits because it is the minimal number of bit to represent a
/// value from 0 to 64 (the size of the trail-allocated number).
unsigned BitWidth : 7;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 3;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 3;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
StmtBits.IsOMPStructuredBlock = false;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; }
void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) {
StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock;
}
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
body_begin()[size() - 1] = S;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
mmc.c | /*******************************************************************
*
* M4RI: Linear Algebra over GF(2)
*
* Copyright (C) 2007, 2008 Gregory Bard <bard@fordham.edu>
* Copyright (C) 2008 Martin Albrecht <M.R.Albrecht@rhul.ac.uk>
*
* Distributed under the terms of the GNU General Public License (GPL)
* version 2 or higher.
*
* This code is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full text of the GPL is available at:
*
* http://www.gnu.org/licenses/
*
********************************************************************/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "mmc.h"
#if __M4RI_ENABLE_MMC
/**
* The actual memory block cache.
*/
mmb_t m4ri_mmc_cache[__M4RI_MMC_NBLOCKS];
#endif // __M4RI_ENABLE_MMC
/**
* \brief Allocate size bytes.
*
* \param size Number of bytes.
*
* \return pointer to allocated memory block.
*/
void *m4ri_mmc_malloc(size_t size) {
#if __M4RI_ENABLE_MMC
void *ret = NULL;
#if __M4RI_HAVE_OPENMP
#pragma omp critical (mmc)
{
#endif
mmb_t *mm = m4ri_mmc_cache;
if (size <= __M4RI_MMC_THRESHOLD) {
for (int i = 0; i < __M4RI_MMC_NBLOCKS; ++i) {
if(mm[i].size == size) {
ret = mm[i].data;
mm[i].data = NULL;
mm[i].size = 0;
break;
}
}
}
#if __M4RI_HAVE_OPENMP
}
#endif
if (ret)
return ret;
else
return m4ri_mm_malloc(size);
#else // __M4RI_ENABLE_MMC
return m4ri_mm_malloc(size);
#endif // __M4RI_ENABLE_MMC
}
/**
* \brief Free the data pointed to by condemned of the given size.
*
* \param condemned Pointer to memory.
* \param size Number of bytes.
*/
void m4ri_mmc_free(void *condemned, size_t size) {
#if __M4RI_ENABLE_MMC
#if __M4RI_HAVE_OPENMP
#pragma omp critical (mmc)
{
#endif
static int j = 0;
mmb_t *mm = m4ri_mmc_cache;
if (size < __M4RI_MMC_THRESHOLD) {
for(int i = 0; i < __M4RI_MMC_NBLOCKS; ++i) {
if(mm[i].size == 0) {
mm[i].size = size;
mm[i].data = condemned;
goto done;
}
}
m4ri_mm_free(mm[j].data);
mm[j].size = size;
mm[j].data = condemned;
j = (j+1) % __M4RI_MMC_NBLOCKS;
} else {
m4ri_mm_free(condemned);
}
done:
;
#if __M4RI_HAVE_OPENMP
}
#endif // __M4RI_HAVE_OPENMP
#else // __M4RI_ENABLE_MMC
m4ri_mm_free(condemned);
#endif // __M4RI_ENABLE_MMC
}
/**
* \brief Cleans up memory block cache.
*
* This function is called automatically when the shared library is unloaded.
*
* \warning Not thread safe.
*/
void m4ri_mmc_cleanup(void) {
#if __M4RI_ENABLE_MMC
#if __M4RI_HAVE_OPENMP
#pragma omp critical (mmc)
{
#endif
mmb_t *mm = m4ri_mmc_cache;
for(int i = 0; i < __M4RI_MMC_NBLOCKS; ++i) {
if (mm[i].size)
m4ri_mm_free(mm[i].data);
mm[i].size = 0;
}
#if __M4RI_HAVE_OPENMP
}
#endif // __M4RI_HAVE_OPENMP
#endif // __M4RI_ENABLE_MMC
}
|
for-4.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-ompexp" } */
/* LLVM LOCAL test not applicable */
/* { dg-require-fdump "" } */
extern void bar(int);
void foo (int n)
{
int i;
#pragma omp for schedule(dynamic)
for (i = 0; i < n; ++i)
bar(i);
}
/* { dg-final { scan-tree-dump-times "GOMP_loop_dynamic_start" 1 "ompexp" } } */
/* { dg-final { scan-tree-dump-times "GOMP_loop_dynamic_next" 1 "ompexp" } } */
/* { dg-final { cleanup-tree-dump "ompexp" } } */
|
mysql_netauth_fmt_plug.c | /* Cracker for MySQL network authentication hashes. Hacked together
* during May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mysqlna;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mysqlna);
#else
#include "sha.h"
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "memdbg.h"
#define FORMAT_LABEL "mysqlna"
#define FORMAT_NAME "MySQL Network Authentication"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define HEX_LENGTH 40
#define CIPHERTEXT_LENGTH 90
#define BINARY_SIZE 20
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN MEM_ALIGN_NONE
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests mysqlna_tests[] = {
{"$mysqlna$2D52396369653E4626293B2F75244D3871507A39*7D63098BEE381A51AA6DF11E307E46BD4F8B6E0C", "openwall"},
{"$mysqlna$615c2b5e79656f7d4931594e5b5d416c7b483365*c3a70da2874db890eb2f0a5e3ea80b2ed17da0d0", "openwall"},
{"$mysqlna$295a687c59275452214b366b39776d3f31757b2e*7343f45c94cccd646a1b29bbfad064a9ee5c0380", "overlord magnum"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
char unsigned scramble[20];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
if (strncmp(ciphertext, "$mysqlna$", 9))
return 0;
p = ciphertext + 9;
q = strstr(ciphertext, "*");
if(!q)
return 0;
if (q - p != HEX_LENGTH)
return 0;
while (atoi16[ARCH_INDEX(*p)] != 0x7F && p < q)
p++;
if (q - p != 0)
return 0;
if(strlen(p) < HEX_LENGTH)
return 0;
q = p + 1;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p - 1 == HEX_LENGTH;
}
static char* split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
strncpy(out, ciphertext, sizeof(out));
strlwr(out);
return out;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
ctcopy += 9; /* skip over "$mysqlna$" */
p = strtok(ctcopy, "*");
for (i = 0; i < 20; i++)
cs.scramble[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char stage1_hash[20];
unsigned char inner_hash[20];
unsigned char token[20];
SHA_CTX ctx;
int i;
unsigned char *p = (unsigned char*)crypt_out[index];
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final(stage1_hash, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, stage1_hash, 20);
SHA1_Final(inner_hash, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->scramble, 20);
SHA1_Update(&ctx, inner_hash, 20);
SHA1_Final(token, &ctx);
for(i = 0; i < 20; i++) {
p[i] = token[i] ^ stage1_hash[i];
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void mysqlna_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mysqlna = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
mysqlna_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
mysqlna_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__div_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__div_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int8)
// A*D function (colscale): GB (_AxD__div_int8)
// D*A function (rowscale): GB (_DxB__div_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__div_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__div_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int8)
// C=scalar+B GB (_bind1st__div_int8)
// C=scalar+B' GB (_bind1st_tran__div_int8)
// C=A+scalar GB (_bind2nd__div_int8)
// C=A'+scalar GB (_bind2nd_tran__div_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (x, y, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT8 || GxB_NO_DIV_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 8) ; \
}
GrB_Info GB (_bind1st_tran__div_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 8) ; \
}
GrB_Info GB (_bind2nd_tran__div_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tinyexr.h | /*
Copyright (c) 2014 - 2015, Syoyo Fujita
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the <organization> nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __TINYEXR_H__
#define __TINYEXR_H__
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#ifdef __cplusplus
extern "C" {
#endif
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
//#define TINYEXR_COMPRESSIONTYPE_RLE (1) // not supported yet
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
typedef struct _EXRAttribute {
char *name;
char *type;
int size;
unsigned char *value; // uint8_t*
} EXRAttribute;
typedef struct _EXRImage {
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES];
int num_custom_attributes;
int num_channels;
const char **channel_names;
unsigned char **images; // image[channels][pixels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
int width;
int height;
float pixel_aspect_ratio;
int compression; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int line_order;
int data_window[4];
int display_window[4];
float screen_window_center[2];
float screen_window_width;
} EXRImage;
typedef struct _DeepImage {
int num_channels;
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int width;
int height;
} DeepImage;
// @deprecated { to be removed. }
// Loads single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Return 0 if success
// Returns error string in `err` when there's an error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Parse single-frame OpenEXR header from a file and initialize `EXRImage`
// struct.
// Users then call LoadMultiChannelEXRFromFile to actually load image data into
// `EXRImage`
extern int ParseMultiChannelEXRHeaderFromFile(EXRImage *image,
const char *filename,
const char **err);
// Parse single-frame OpenEXR header from a memory and initialize `EXRImage`
// struct.
// Users then call LoadMultiChannelEXRFromMemory to actually load image data
// into `EXRImage`
extern int ParseMultiChannelEXRHeaderFromMemory(EXRImage *image,
const unsigned char *memory,
const char **err);
// Loads multi-channel, single-frame OpenEXR image from a file.
// Application must setup `ParseMultiChannelEXRHeaderFromFile` before calling
// `LoadMultiChannelEXRFromFile`.
// Application can free EXRImage using `FreeExrImage`
// Return 0 if success
// Returns error string in `err` when there's an error
extern int LoadMultiChannelEXRFromFile(EXRImage *image, const char *filename,
const char **err);
// Loads multi-channel, single-frame OpenEXR image from a memory.
// Application must setup `EXRImage` with `ParseMultiChannelEXRHeaderFromMemory`
// before calling `LoadMultiChannelEXRFromMemory`.
// Application can free EXRImage using `FreeExrImage`
// Return 0 if success
// Returns error string in `err` when there's an error
extern int LoadMultiChannelEXRFromMemory(EXRImage *image,
const unsigned char *memory,
const char **err);
// Saves floating point RGBA image as OpenEXR.
// Image is compressed using EXRImage.compression value.
// Return 0 if success
// Returns error string in `err` when there's an error
// extern int SaveEXR(const float *in_rgba, int width, int height,
// const char *filename, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// `compression_type` is one of TINYEXR_COMPRESSIONTYPE_*.
// Returns 0 if success
// Returns error string in `err` when there's an error
extern int SaveMultiChannelEXRToFile(const EXRImage *image,
const char *filename, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if succes.
// Retruns 0 if success, negative number when failed.
// Returns error string in `err` when there's an error
extern size_t SaveMultiChannelEXRToMemory(const EXRImage *image,
unsigned char **memory,
const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns 0 if success
// Returns error string in `err` when there's an error
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Return 0 if success
// Returns error string in `err` when there's an error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// Initialize of EXRImage struct
extern void InitEXRImage(EXRImage *exrImage);
// Free's internal data of EXRImage struct
// Returns 0 if success.
extern int FreeEXRImage(EXRImage *exrImage);
// For emscripten.
// Parse single-frame OpenEXR header from memory.
// Return 0 if success
extern int ParseEXRHeaderFromMemory(EXRAttribute *customAttributes,
int *numCustomAttributes, int *width,
int *height, const unsigned char *memory);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// `out_rgba` must have enough memory(at least sizeof(float) x 4(RGBA) x width x
// hight)
// Return 0 if success
// Returns error string in `err` when there's an error
extern int LoadEXRFromMemory(float *out_rgba, const unsigned char *memory,
const char **err);
#ifdef __cplusplus
}
#endif
#ifdef TINYEXR_IMPLEMENTATION
#include <cstdio>
#include <cstdlib>
#include <cassert>
#include <cstring>
#include <algorithm>
#include <string>
#include <vector>
#include "tinyexr.h"
#ifdef _OPENMP
#include <omp.h>
#endif
namespace {
namespace miniz {
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occured in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
//#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
//#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#ifndef MINIZ_HAS_64BIT_REGISTERS
# define MINIZ_HAS_64BIT_REGISTERS 0
#endif
#ifndef TINFL_USE_64BIT_BITBUF
# if MINIZ_HAS_64BIT_REGISTERS
# define TINFL_USE_64BIT_BITBUF 1
# else
# define TINFL_USE_64BIT_BITBUF 0
# endif
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1,
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
#include <string.h>
#include <assert.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void) x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr)
return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i)
s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c};
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr)
return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
static void *def_realloc_func(void *opaque, void *address, size_t items,
size_t size) {
(void)opaque, (void)address, (void)items, (void)size;
return MZ_REALLOC(address, items * size);
}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream)
return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc)
pStream->zalloc = def_alloc_func;
if (!pStream->zfree)
pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp)
return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out)
return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH)
flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
} else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
} else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream)
return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU)
return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK)
return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream)
return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc)
pStream->zalloc = def_alloc_func;
if (!pStream->zfree)
pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp)
return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state))
return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH)
flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0)
decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0)
return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH)
decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
} else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream)
return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU)
return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK)
return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = {{MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"}};
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err)
return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index: \
; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) \
break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) \
break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0};
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0};
static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const int s_min_table_sizes[3] = {257, 1, 4};
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next,
*const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next,
*const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1U << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
} else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
} else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
} else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i)
*p++ = 8;
for (; i <= 255; ++i)
*p++ = 9;
for (; i <= 279; ++i)
*p++ = 7;
for (; i <= 287; ++i)
*p++ = 8;
} else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size)
continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
} else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256)
break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
} else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256)
break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256)
break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1)
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1)
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit:
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i)
s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE)
break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128)
new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict)
return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags &
~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285};
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0};
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17};
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29};
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13};
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
} else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1)
return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
} else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++)
num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0)
continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) \
packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1])
break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1])
break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
} else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
} else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i)
*p++ = 8;
for (; i <= 255; ++i)
*p++ = 9;
for (; i <= 279; ++i)
*p++ = 7;
for (; i <= 287; ++i)
*p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF};
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1)
flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end)
return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1)
flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
} else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
} else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
} else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
} else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
} else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void
tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist,
mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len)
return;
for (;;) {
for (;;) {
if (--num_probes_left == 0)
return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist)
break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01)
continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
} else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void
tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist,
mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len)
return;
for (;;) {
for (;;) {
if (--num_probes_left == 0)
return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist)
break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++)
break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len)
return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
} else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
} else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void
tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
} else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c)
break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
} else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
} else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
} else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
} else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size)
*pIn_buf_size = 0;
if (pOut_buf_size)
*pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size)
*pIn_buf_size = 0;
if (pOut_buf_size)
*pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d))
return d->m_prev_return_status;
} else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d))
return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0)
return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func))
return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp)
return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable)
return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf)
return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf)
return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500};
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0)
comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to 'int',
// possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500};
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp)
return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z)
tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] |
TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06};
mz_uint8 pnghdr[41] = {
0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d,
0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0,
(mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0,
0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41,
0x54};
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) //|| defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream))
return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity)
return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity)
new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray,
size_t new_capacity, mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool
mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray,
const void *pElements, size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0)
return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc)
pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree)
pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc)
pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR =
&MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR)))
break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void
mz_zip_reader_sort_central_dir_offsets_by_filename(mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices =
&MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32,
0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size)
break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end)
break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG)
break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size)
return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1))
return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead))
return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags))
return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags))
return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile)
return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *
mz_zip_reader_get_cdh(mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets,
mz_uint32, file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p)
return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p)
return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0)
return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat))
return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size)
pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE)
return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i]))
return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int
mz_zip_reader_filename_compare(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, const char *pR, mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR)))
break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices =
&MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32,
0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF)
return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF)
return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader =
&MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets,
mz_uint32, file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len)
continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf))
return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size)
return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index))
return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32))
return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size)
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size)
return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
} else {
// Temporarily allocate a read buffer.
read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0)
return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize)
*pSize = 0;
if (!p)
return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize)
*pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize)
*pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size)
return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index))
return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32))
return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
} else {
read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
} else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
} else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem)
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf)
pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0)
return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile)
return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF)
return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0)
return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc)
pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree)
pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc)
pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size)
new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning))
return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning))
return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip)
return MZ_FALSE;
if (!pFilename)
return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
} else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip)
return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/')
return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':'))
return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint
mz_zip_writer_compute_padding_needed_for_file_alignment(mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment)
return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0)
level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name))
return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF)
return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size))
return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs,
num_alignment_padding_bytes +
sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)
method = MZ_DEFLATED;
} else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0)
level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name))
return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF)
return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file)
return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3)
level = 0;
if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs,
num_alignment_padding_bytes +
sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
} else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size =
(mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH
: TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
} else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF)
return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF)
return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF))
return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize))
return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func)
return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip))
return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0)
level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name))
return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
} else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(&zip_archive, pZip_filename,
level_and_flags |
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive))
status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive))
status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize)
*pSize = 0;
if ((!pZip_filename) || (!pArchive_name))
return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(&zip_archive, pZip_filename,
flags |
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
}
bool IsBigEndian(void) {
union {
unsigned int i;
char c[4];
} bint = {0x01020304};
return bint.c[0] == 1;
}
void swap2(unsigned short *val) {
unsigned short tmp = *val;
unsigned char *dst = (unsigned char *)val;
unsigned char *src = (unsigned char *)&tmp;
dst[0] = src[1];
dst[1] = src[0];
}
void swap4(unsigned int *val) {
unsigned int tmp = *val;
unsigned char *dst = (unsigned char *)val;
unsigned char *src = (unsigned char *)&tmp;
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
}
void swap8(unsigned long long *val) {
unsigned long long tmp = (*val);
unsigned char *dst = (unsigned char *)val;
unsigned char *src = (unsigned char *)&tmp;
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
FP32 half_to_float(FP16 h) {
static const FP32 magic = {113 << 23};
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fff) << 13; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000) << 16; // sign bit
return o;
}
FP16 float_to_half_full(FP32 f) {
FP16 o = {0};
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
} else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
} else {
o.s.Exponent = newexp;
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
const char *ReadString(std::string &s, const char *ptr) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((*q) != 0)
q++;
s = std::string(p, q);
return q + 1; // skip '\0'
}
const char *ReadAttribute(std::string &name, std::string &ty,
std::vector<unsigned char> &data, const char *ptr) {
if ((*ptr) == 0) {
// end of attribute.
return NULL;
}
const char *p = ReadString(name, ptr);
p = ReadString(ty, p);
int dataLen;
memcpy(&dataLen, p, sizeof(int));
p += 4;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&dataLen));
}
data.resize(dataLen);
memcpy(&data.at(0), p, dataLen);
p += dataLen;
return p;
}
void WriteAttribute(FILE *fp, const char *name, const char *type,
const unsigned char *data, int len) {
size_t n = fwrite(name, 1, strlen(name) + 1, fp);
assert(n == strlen(name) + 1);
n = fwrite(type, 1, strlen(type) + 1, fp);
assert(n == strlen(type) + 1);
int outLen = len;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&outLen));
}
n = fwrite(&outLen, 1, sizeof(int), fp);
assert(n == sizeof(int));
n = fwrite(data, 1, len, fp);
assert(n == (size_t)len);
(void)n;
}
void WriteAttributeToMemory(std::vector<unsigned char> &out, const char *name,
const char *type, const unsigned char *data,
int len) {
out.insert(out.end(), name, name + strlen(name) + 1);
out.insert(out.end(), type, type + strlen(type) + 1);
int outLen = len;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&outLen));
}
out.insert(out.end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out.insert(out.end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixelType;
unsigned char pLinear;
int xSampling;
int ySampling;
} ChannelInfo;
void ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
p = ReadString(info.name, p);
memcpy(&info.pixelType, p, sizeof(int));
p += 4;
info.pLinear = p[0]; // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.xSampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.ySampling, p, sizeof(int)); // int
p += 4;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&info.pixelType));
swap4(reinterpret_cast<unsigned int *>(&info.xSampling));
swap4(reinterpret_cast<unsigned int *>(&info.ySampling));
}
channels.push_back(info);
}
}
void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixelType = channels[c].pixelType;
int xSampling = channels[c].xSampling;
int ySampling = channels[c].ySampling;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&pixelType));
swap4(reinterpret_cast<unsigned int *>(&xSampling));
swap4(reinterpret_cast<unsigned int *>(&ySampling));
}
memcpy(p, &pixelType, sizeof(int));
p += sizeof(int);
(*p) = channels[c].pLinear;
p += 4;
memcpy(p, &xSampling, sizeof(int));
p += sizeof(int);
memcpy(p, &ySampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
void CompressZip(unsigned char *dst, unsigned long long &compressedSize,
const unsigned char *src, unsigned long srcSize) {
std::vector<unsigned char> tmpBuf(srcSize);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
{
char *t1 = (char *)&tmpBuf.at(0);
char *t2 = (char *)&tmpBuf.at(0) + (srcSize + 1) / 2;
const char *stop = (const char *)src + srcSize;
while (true) {
if ((const char *)src < stop)
*(t1++) = *(src++);
else
break;
if ((const char *)src < stop)
*(t2++) = *(src++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + srcSize;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = d;
++t;
}
}
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(srcSize);
int ret = miniz::mz_compress(dst, &outSize,
(const unsigned char *)&tmpBuf.at(0), srcSize);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
}
void DecompressZip(unsigned char *dst, unsigned long &uncompressedSize,
const unsigned char *src, unsigned long srcSize) {
std::vector<unsigned char> tmpBuf(uncompressedSize);
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), &uncompressedSize, src, srcSize);
assert(ret == miniz::MZ_OK);
(void)ret;
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressedSize;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = d;
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressedSize + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressedSize;
while (true) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
}
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
#if 0 // @todo
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = a;
short bs = b;
short ms = (as + bs) >> 1;
short ds = as - bs;
l = ms;
h = ds;
}
#endif
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = l;
short hs = h;
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = ai;
short bs = ai - hi;
a = as;
b = bs;
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
// const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
#if 0 // @ood
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0)
m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = m;
h = d;
}
#endif
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = bb;
a = aa;
}
//
// 2D Wavelet encoding:
//
#if 0 // @todo
void wav2Encode(unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierachical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
} else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
#endif
//
// 2D Wavelet decoding:
//
void wav2Decode(unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n)
p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
} else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
int len : 8; // code length 0
int lit : 24; // lit p size
int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
#if 0
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8)
*out++ = (c >> (lc -= 8));
}
#endif
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(unsigned char *)(in++);
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i)
n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i)
n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = hcode[i];
if (l > 0)
hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
#if 0 // @todo
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
int hlink[HUF_ENCSIZE];
long long *fHeap[HUF_ENCSIZE];
*im = 0;
while (!frq[*im])
(*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
long long scode[HUF_ENCSIZE];
memset(scode, 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m; true; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm; true; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j)
break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode);
memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE);
}
#endif
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
// const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
#if 0
void hufPackEncTable(const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0)
break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
} else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0)
*p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
#endif
//
// Unpack an encoding table packed by hufPackEncTable():
//
bool hufUnpackEncTable(const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode > ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--)
hcode[im++] = 0;
im--;
} else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--)
hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
int *p = pl->p;
pl->p = new int[pl->lit];
for (int i = 0; i < pl->lit - 1; ++i)
pl->p[i] = p[i];
delete[] p;
} else {
pl->p = new int[1];
}
pl->p[pl->lit - 1] = im;
} else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = (long long)1 << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
#if 0 // @todo
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
} else {
while (runCount-- >= 0)
outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
} else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc)
*out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
#endif
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#define getCode(po, rlc, c, lc, in, out, oe) \
{ \
if (po == rlc) { \
if (lc < 8) \
getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) \
return false; \
\
unsigned short s = out[-1]; \
\
while (cs-- > 0) \
*out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out;
unsigned short *oe = out + no;
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < pl.lit; j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
getCode(pl.p[j], rlc, c, lc, in, out, oe);
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
getCode(pl.lit, rlc, c, lc, in, out, oe);
} else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
#if 0 // @todo
void countFrequencies(long long freq[HUF_ENCSIZE],
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i)
freq[i] = 0;
for (int i = 0; i < n; ++i)
++freq[data[i]];
}
void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
#endif
unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
#if 0 // @todo
int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) {
if (nRaw == 0)
return 0;
long long freq[HUF_ENCSIZE];
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq, &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq, im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq, raw, nRaw, iM, dataStart);
int dataLength = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + dataLength - compressed;
}
#endif
bool hufUncompress(const char compressed[], int nCompressed,
unsigned short raw[], int nRaw) {
if (nCompressed == 0) {
if (nRaw != 0)
return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE)
return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw);
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
#if 0 // @todo
void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero, unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i)
bitmap[i] = 0;
for (int i = 0; i < nData; ++i)
bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i)
minNonZero = i;
if (maxNonZero < i)
maxNonZero = i;
}
}
}
unsigned short forwardLutFromBitmap(const unsigned char bitmap[BITMAP_SIZE],
unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
#endif
unsigned short reverseLutFromBitmap(const unsigned char bitmap[BITMAP_SIZE],
unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE)
lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i)
data[i] = lut[data[i]];
}
#if 0 // @todo
bool CompressPiz(unsigned char *outPtr, unsigned int &outSize) {
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
if (IsBigEndian()) {
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
}
std::vector<unsigned short> tmpBuffer;
int nData = tmpBuffer.size();
bitmapFromData(&tmpBuffer.at(0), nData, bitmap, minNonZero, maxNonZero);
unsigned short lut[USHORT_RANGE];
//unsigned short maxValue = forwardLutFromBitmap(bitmap, lut);
applyLut(lut, &tmpBuffer.at(0), nData);
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, (char *)&bitmap[0] + minNonZero, maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
#if 0 // @todo
//
// Apply wavelet encoding
//
for (int i = 0; i < channels; ++i)
{
ChannelData &cd = _channelData[i];
for (int j = 0; j < cd.size; ++j)
{
wav2Encode (cd.start + j,
cd.nx, cd.size,
cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int)); buf += sizeof(int);
int length = hufCompress (_tmpBuffer, tmpBufferEnd - _tmpBuffer, buf);
memcpy(lengthPtr, tmpBuffer, length);
//Xdr::write <CharPtrIO> (lengthPtr, length);
outPtr = _outBuffer;
return buf - _outBuffer + length;
#endif
assert(0);
return true;
}
#endif
bool DecompressPiz(unsigned char *outPtr, unsigned int &,
const unsigned char *inPtr, size_t tmpBufSize,
const std::vector<ChannelInfo> &channelInfo, int dataWidth,
int numLines) {
unsigned char bitmap[BITMAP_SIZE];
unsigned short minNonZero;
unsigned short maxNonZero;
if (IsBigEndian()) {
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
}
memset(bitmap, 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy((char *)&bitmap[0] + minNonZero, ptr, maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
unsigned short lut[USHORT_RANGE];
memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap, lut);
//
// Huffman decoding
//
int length;
length = *(reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0),
tmpBufSize);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < channelInfo.size(); ++i) {
const ChannelInfo &chan = channelInfo[i];
int pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixelType == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = dataWidth;
channelData[i].ny = numLines;
// channelData[i].ys = 1;
channelData[i].size = pixelSize / sizeof(short);
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut, &tmpBuffer.at(0), tmpBufSize);
// @todo { Xdr }
for (int y = 0; y < numLines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
int n = cd.nx * cd.size;
memcpy(outPtr, cd.end, n * sizeof(unsigned short));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
//
// -----------------------------------------------------------------
//
} // namespace
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
if (out_rgba == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return -1;
}
EXRImage exrImage;
InitEXRImage(&exrImage);
{
int ret = ParseMultiChannelEXRHeaderFromFile(&exrImage, filename, err);
if (ret != 0) {
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exrImage.num_channels; i++) {
if (exrImage.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exrImage.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
{
int ret = LoadMultiChannelEXRFromFile(&exrImage, filename, err);
if (ret != 0) {
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exrImage.num_channels; c++) {
if (strcmp(exrImage.channel_names[c], "R") == 0) {
idxR = c;
} else if (strcmp(exrImage.channel_names[c], "G") == 0) {
idxG = c;
} else if (strcmp(exrImage.channel_names[c], "B") == 0) {
idxB = c;
} else if (strcmp(exrImage.channel_names[c], "A") == 0) {
idxA = c;
}
}
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exrImage }
return -1;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exrImage }
return -1;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exrImage }
return -1;
}
(*out_rgba) =
(float *)malloc(4 * sizeof(float) * exrImage.width * exrImage.height);
for (int i = 0; i < exrImage.width * exrImage.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exrImage.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exrImage.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exrImage.images)[idxB][i];
if (idxA > 0) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exrImage.images)[idxA][i];
} else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
(*width) = exrImage.width;
(*height) = exrImage.height;
// @todo { free exrImage }
return 0;
}
int ParseEXRHeaderFromMemory(EXRAttribute *customAttributes,
int *numCustomAttributes, int *width, int *height,
const unsigned char *memory) {
if (memory == NULL) {
// Invalid argument
return -1;
}
const char *buf = reinterpret_cast<const char *>(memory);
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
// if (err) {
// (*err) = "Header mismatch.";
//}
return -3;
}
marker += 4;
}
// Version, scanline.
{
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 0 || marker[2] != 0 || marker[3] != 0) {
// if (err) {
// (*err) = "Unsupported version or scanline.";
//}
return -4;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int lineOrder = 0; // @fixme
int displayWindow[4] = {-1, -1, -1, -1}; // @fixme
float screenWindowCenter[2] = {0.0f, 0.0f}; // @fixme
float screenWindowWidth = 1.0f; // @fixme
int numChannels = -1;
float pixelAspectRatio = 1.0f; // @fixme
std::vector<ChannelInfo> channels;
std::vector<EXRAttribute> attribs;
if (numCustomAttributes) {
(*numCustomAttributes) = 0;
}
// Read attributes
for (;;) {
std::string attrName;
std::string attrType;
std::vector<unsigned char> data;
const char *marker_next = ReadAttribute(attrName, attrType, data, marker);
if (marker_next == NULL) {
marker++; // skip '\0'
break;
}
if (attrName.compare("compression") == TINYEXR_COMPRESSIONTYPE_NONE) {
// mwkm
// 0 : NO_COMPRESSION
// 1 : RLE
// 2 : ZIPS (Single scanline)
// 3 : ZIP (16-line block)
// 4 : PIZ (32-line block)
if (data[0] > TINYEXR_COMPRESSIONTYPE_PIZ) {
// if (err) {
// (*err) = "Unsupported compression type.";
//}
return -5;
}
} else if (attrName.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
ReadChannelInfo(channels, data);
numChannels = channels.size();
if (numChannels < 1) {
// if (err) {
// (*err) = "Invalid channels format.";
//}
return -6;
}
} else if (attrName.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&dx));
swap4(reinterpret_cast<unsigned int *>(&dy));
swap4(reinterpret_cast<unsigned int *>(&dw));
swap4(reinterpret_cast<unsigned int *>(&dh));
}
} else if (attrName.compare("displayWindow") == 0) {
memcpy(&displayWindow[0], &data.at(0), sizeof(int));
memcpy(&displayWindow[1], &data.at(4), sizeof(int));
memcpy(&displayWindow[2], &data.at(8), sizeof(int));
memcpy(&displayWindow[3], &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&displayWindow[0]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[1]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[2]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[3]));
}
} else if (attrName.compare("lineOrder") == 0) {
memcpy(&lineOrder, &data.at(0), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&lineOrder));
}
} else if (attrName.compare("pixelAspectRatio") == 0) {
memcpy(&pixelAspectRatio, &data.at(0), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&pixelAspectRatio));
}
} else if (attrName.compare("screenWindowCenter") == 0) {
memcpy(&screenWindowCenter[0], &data.at(0), sizeof(float));
memcpy(&screenWindowCenter[1], &data.at(4), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[0]));
swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[1]));
}
} else if (attrName.compare("screenWindowWidth") == 0) {
memcpy(&screenWindowWidth, &data.at(0), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&screenWindowWidth));
}
} else {
// Custom attribute(up to TINYEXR_MAX_ATTRIBUTES)
if (numCustomAttributes &&
((*numCustomAttributes) < TINYEXR_MAX_ATTRIBUTES)) {
EXRAttribute attrib;
attrib.name = strdup(attrName.c_str());
attrib.type = strdup(attrType.c_str());
attrib.size = data.size();
attrib.value = (unsigned char *)malloc(data.size());
memcpy((char *)attrib.value, &data.at(0), data.size());
attribs.push_back(attrib);
}
}
marker = marker_next;
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(numChannels >= 1);
int dataWidth = dw - dx + 1;
int dataHeight = dh - dy + 1;
(*width) = dataWidth;
(*height) = dataHeight;
if (numCustomAttributes) {
assert(attribs.size() < TINYEXR_MAX_ATTRIBUTES);
(*numCustomAttributes) = attribs.size();
// Assume the pointer to customAttributes has enough memory to store.
for (int i = 0; i < (int)attribs.size(); i++) {
customAttributes[i] = attribs[i];
}
}
return 0;
}
int LoadEXRFromMemory(float *out_rgba, const unsigned char *memory,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
if (err) {
(*err) = "Invalid argument.\n";
}
return -1;
}
EXRImage exrImage;
InitEXRImage(&exrImage);
int ret = LoadMultiChannelEXRFromMemory(&exrImage, memory, err);
if (ret != 0) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exrImage.num_channels; c++) {
if (strcmp(exrImage.channel_names[c], "R") == 0) {
idxR = c;
} else if (strcmp(exrImage.channel_names[c], "G") == 0) {
idxG = c;
} else if (strcmp(exrImage.channel_names[c], "B") == 0) {
idxB = c;
} else if (strcmp(exrImage.channel_names[c], "A") == 0) {
idxA = c;
}
}
if (idxR == -1) {
if (err) {
(*err) = "R channel not found\n";
}
// @todo { free exrImage }
return -1;
}
if (idxG == -1) {
if (err) {
(*err) = "G channel not found\n";
}
// @todo { free exrImage }
return -1;
}
if (idxB == -1) {
if (err) {
(*err) = "B channel not found\n";
}
// @todo { free exrImage }
return -1;
}
// Assume `out_rgba` have enough memory allocated.
for (int i = 0; i < exrImage.width * exrImage.height; i++) {
out_rgba[4 * i + 0] = reinterpret_cast<float **>(exrImage.images)[idxR][i];
out_rgba[4 * i + 1] = reinterpret_cast<float **>(exrImage.images)[idxG][i];
out_rgba[4 * i + 2] = reinterpret_cast<float **>(exrImage.images)[idxB][i];
if (idxA > 0) {
out_rgba[4 * i + 3] =
reinterpret_cast<float **>(exrImage.images)[idxA][i];
} else {
out_rgba[4 * i + 3] = 1.0;
}
}
return 0;
}
int LoadMultiChannelEXRFromFile(EXRImage *exrImage, const char *filename,
const char **err) {
if (exrImage == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return -1;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = ftell(fp);
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadMultiChannelEXRFromMemory(exrImage, &buf.at(0), err);
}
int LoadMultiChannelEXRFromMemory(EXRImage *exrImage,
const unsigned char *memory,
const char **err) {
if (exrImage == NULL || memory == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
const char *buf = reinterpret_cast<const char *>(memory);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
if (err) {
(*err) = "Header mismatch.";
}
return -3;
}
marker += 4;
}
// Version, scanline.
{
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 0 || marker[2] != 0 || marker[3] != 0) {
if (err) {
(*err) = "Unsupported version or scanline.";
}
return -4;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int numScanlineBlocks = 1; // 16 for ZIP compression.
int compressionType = -1;
int numChannels = -1;
unsigned char lineOrder = 0; // 0 -> increasing y; 1 -> decreasing
std::vector<ChannelInfo> channels;
// Read attributes
for (;;) {
std::string attrName;
std::string attrType;
std::vector<unsigned char> data;
const char *marker_next = ReadAttribute(attrName, attrType, data, marker);
if (marker_next == NULL) {
marker++; // skip '\0'
break;
}
if (attrName.compare("compression") == 0) {
// mwkm
// 0 : NO_COMPRESSION
// 1 : RLE
// 2 : ZIPS (Single scanline)
// 3 : ZIP (16-line block)
// 4 : PIZ (32-line block)
if (data[0] != TINYEXR_COMPRESSIONTYPE_NONE &&
data[0] != TINYEXR_COMPRESSIONTYPE_ZIPS &&
data[0] != TINYEXR_COMPRESSIONTYPE_ZIP &&
data[0] != TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "Unsupported compression type.";
}
return -5;
}
compressionType = data[0];
if (compressionType == TINYEXR_COMPRESSIONTYPE_ZIP) {
numScanlineBlocks = 16;
} else if (compressionType == TINYEXR_COMPRESSIONTYPE_PIZ) {
numScanlineBlocks = 32;
}
} else if (attrName.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
ReadChannelInfo(channels, data);
numChannels = channels.size();
if (numChannels < 1) {
if (err) {
(*err) = "Invalid channels format.";
}
return -6;
}
} else if (attrName.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&dx));
swap4(reinterpret_cast<unsigned int *>(&dy));
swap4(reinterpret_cast<unsigned int *>(&dw));
swap4(reinterpret_cast<unsigned int *>(&dh));
}
} else if (attrName.compare("displayWindow") == 0) {
int x, y, w, h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&x));
swap4(reinterpret_cast<unsigned int *>(&y));
swap4(reinterpret_cast<unsigned int *>(&w));
swap4(reinterpret_cast<unsigned int *>(&h));
}
} else if (attrName.compare("lineOrder") == 0) {
memcpy(&lineOrder, &data.at(0), sizeof(lineOrder));
}
marker = marker_next;
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(numChannels >= 1);
int dataWidth = dw - dx + 1;
int dataHeight = dh - dy + 1;
// Read offset tables.
int numBlocks = dataHeight / numScanlineBlocks;
if (numBlocks * numScanlineBlocks < dataHeight) {
numBlocks++;
}
std::vector<long long> offsets(numBlocks);
for (int y = 0; y < numBlocks; y++) {
long long offset;
memcpy(&offset, marker, sizeof(long long));
if (IsBigEndian()) {
swap8(reinterpret_cast<unsigned long long *>(&offset));
}
marker += sizeof(long long); // = 8
offsets[y] = offset;
}
exrImage->images = reinterpret_cast<unsigned char **>(
(float **)malloc(sizeof(float *) * numChannels));
std::vector<size_t> channelOffsetList(numChannels);
int pixelDataSize = 0;
size_t channelOffset = 0;
for (int c = 0; c < numChannels; c++) {
channelOffsetList[c] = channelOffset;
if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) {
pixelDataSize += sizeof(unsigned short);
channelOffset += sizeof(unsigned short);
// Alloc internal image for half type.
if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
exrImage->images[c] =
reinterpret_cast<unsigned char *>((unsigned short *)malloc(
sizeof(unsigned short) * dataWidth * dataHeight));
} else if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
exrImage->images[c] = reinterpret_cast<unsigned char *>(
(float *)malloc(sizeof(float) * dataWidth * dataHeight));
} else {
assert(0);
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) {
pixelDataSize += sizeof(float);
channelOffset += sizeof(float);
exrImage->images[c] = reinterpret_cast<unsigned char *>(
(float *)malloc(sizeof(float) * dataWidth * dataHeight));
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) {
pixelDataSize += sizeof(unsigned int);
channelOffset += sizeof(unsigned int);
exrImage->images[c] = reinterpret_cast<unsigned char *>((
unsigned int *)malloc(sizeof(unsigned int) * dataWidth * dataHeight));
} else {
assert(0);
}
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < numBlocks; y++) {
const unsigned char *dataPtr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
int lineNo;
memcpy(&lineNo, dataPtr, sizeof(int));
int dataLen;
memcpy(&dataLen, dataPtr + 4, sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&lineNo));
swap4(reinterpret_cast<unsigned int *>(&dataLen));
}
int endLineNo = (std::min)(lineNo + numScanlineBlocks, dataHeight);
int numLines = endLineNo - lineNo;
if (compressionType == 4) { // PIZ
// Allocate original data size.
std::vector<unsigned char> outBuf(dataWidth * numLines * pixelDataSize);
unsigned int dstLen;
size_t tmpBufLen = dataWidth * numLines * pixelDataSize;
DecompressPiz(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen,
dataPtr + 8, tmpBufLen, channels, dataWidth, numLines);
bool isBigEndian = IsBigEndian();
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (int c = 0; c < numChannels; c++) {
if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) {
for (int v = 0; v < numLines; v++) {
const unsigned short *linePtr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
FP16 hf;
hf.u = linePtr[u];
if (isBigEndian) {
swap2(reinterpret_cast<unsigned short *>(&hf.u));
}
if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) {
assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (int v = 0; v < numLines; v++) {
const unsigned int *linePtr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
unsigned int val = linePtr[u];
if (isBigEndian) {
swap4(&val);
}
unsigned int *image =
reinterpret_cast<unsigned int **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = val;
}
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) {
assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (int v = 0; v < numLines; v++) {
const float *linePtr = reinterpret_cast<float *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
float val = linePtr[u];
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&val));
}
float *image = reinterpret_cast<float **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = val;
}
}
} else {
assert(0);
}
}
// mwkm, ZIPS or ZIP both good to go
} else if (compressionType == 2 || compressionType == 3) { // ZIP
// Allocate original data size.
std::vector<unsigned char> outBuf(dataWidth * numLines * pixelDataSize);
unsigned long dstLen = outBuf.size();
DecompressZip(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen,
dataPtr + 8, dataLen);
bool isBigEndian = IsBigEndian();
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (int c = 0; c < numChannels; c++) {
if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) {
for (int v = 0; v < numLines; v++) {
const unsigned short *linePtr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
FP16 hf;
hf.u = linePtr[u];
if (isBigEndian) {
swap2(reinterpret_cast<unsigned short *>(&hf.u));
}
if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = hf.u;
} else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = f32.f;
}
}
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) {
assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (int v = 0; v < numLines; v++) {
const unsigned int *linePtr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
unsigned int val = linePtr[u];
if (isBigEndian) {
swap4(&val);
}
unsigned int *image =
reinterpret_cast<unsigned int **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = val;
}
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) {
assert(exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (int v = 0; v < numLines; v++) {
const float *linePtr = reinterpret_cast<float *>(
&outBuf.at(v * pixelDataSize * dataWidth +
channelOffsetList[c] * dataWidth));
for (int u = 0; u < dataWidth; u++) {
float val = linePtr[u];
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&val));
}
float *image = reinterpret_cast<float **>(exrImage->images)[c];
if (lineOrder == 0) {
image += (lineNo + v) * dataWidth + u;
} else {
image += (dataHeight - 1 - (lineNo + v)) * dataWidth + u;
}
*image = val;
}
}
} else {
assert(0);
}
}
} else if (compressionType == 0) { // No compression
bool isBigEndian = IsBigEndian();
for (int c = 0; c < numChannels; c++) {
if (channels[c].pixelType == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *linePtr =
reinterpret_cast<const unsigned short *>(
dataPtr + 8 + c * dataWidth * sizeof(unsigned short));
if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(exrImage->images[c]);
if (lineOrder == 0) {
outLine += y * dataWidth;
} else {
outLine += (dataHeight - 1 - y) * dataWidth;
}
for (int u = 0; u < dataWidth; u++) {
FP16 hf;
hf.u = linePtr[u];
if (isBigEndian) {
swap2(reinterpret_cast<unsigned short *>(&hf.u));
}
outLine[u] = hf.u;
}
} else if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(exrImage->images[c]);
if (lineOrder == 0) {
outLine += y * dataWidth;
} else {
outLine += (dataHeight - 1 - y) * dataWidth;
}
for (int u = 0; u < dataWidth; u++) {
FP16 hf;
hf.u = linePtr[u];
if (isBigEndian) {
swap2(reinterpret_cast<unsigned short *>(&hf.u));
}
FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
} else {
assert(0);
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_FLOAT) {
const float *linePtr = reinterpret_cast<const float *>(
dataPtr + 8 + c * dataWidth * sizeof(float));
float *outLine = reinterpret_cast<float *>(exrImage->images[c]);
if (lineOrder == 0) {
outLine += y * dataWidth;
} else {
outLine += (dataHeight - 1 - y) * dataWidth;
}
for (int u = 0; u < dataWidth; u++) {
float val = linePtr[u];
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&val));
}
outLine[u] = val;
}
} else if (channels[c].pixelType == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *linePtr = reinterpret_cast<const unsigned int *>(
dataPtr + 8 + c * dataWidth * sizeof(unsigned int));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(exrImage->images[c]);
if (lineOrder == 0) {
outLine += y * dataWidth;
} else {
outLine += (dataHeight - 1 - y) * dataWidth;
}
for (int u = 0; u < dataWidth; u++) {
unsigned int val = linePtr[u];
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&val));
}
outLine[u] = val;
}
}
}
}
} // omp parallel
{
exrImage->channel_names =
(const char **)malloc(sizeof(const char *) * numChannels);
for (int c = 0; c < numChannels; c++) {
#ifdef _WIN32
exrImage->channel_names[c] = _strdup(channels[c].name.c_str());
#else
exrImage->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
exrImage->num_channels = numChannels;
exrImage->width = dataWidth;
exrImage->height = dataHeight;
// Fill with requested_pixel_types.
exrImage->pixel_types = (int *)malloc(sizeof(int *) * numChannels);
for (int c = 0; c < numChannels; c++) {
exrImage->pixel_types[c] = exrImage->requested_pixel_types[c];
}
}
return 0; // OK
}
// @deprecated
#if 0
int SaveEXR(const float *in_rgba, int width, int height, const char *filename,
const char **err) {
if (in_rgba == NULL || filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "wb");
if (!fp) {
if (err) {
(*err) = "Cannot write a file.";
}
return -1;
}
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
size_t n = fwrite(header, 1, 4, fp);
assert(n == 4);
}
// Version, scanline.
{
const char marker[] = {2, 0, 0, 0};
size_t n = fwrite(marker, 1, 4, fp);
assert(n == 4);
}
int numScanlineBlocks = 16; // 16 for ZIP compression.
// Write attributes.
{
unsigned char data[] = {
'A', 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 'B',
0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 'G', 0,
1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 'R', 0, 1,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0}; // last 0 =
// terminator.
WriteAttribute(fp, "channels", "chlist", data, 18 * 4 + 1); // +1 = null
}
{
int compressionType = 3; // ZIP compression
WriteAttribute(fp, "compression", "compression",
reinterpret_cast<const unsigned char *>(&compressionType),
1);
}
{
int data[4] = {0, 0, width - 1, height - 1};
WriteAttribute(fp, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
WriteAttribute(fp, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
}
{
unsigned char lineOrder = 0; // increasingY
WriteAttribute(fp, "lineOrder", "lineOrder", &lineOrder, 1);
}
{
float aspectRatio = 1.0f;
WriteAttribute(fp, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio),
sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
WriteAttribute(fp, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center),
2 * sizeof(float));
}
{
float w = (float)width;
WriteAttribute(fp, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w), sizeof(float));
}
{ // end of header
unsigned char e = 0;
fwrite(&e, 1, 1, fp);
}
int numBlocks = height / numScanlineBlocks;
if (numBlocks * numScanlineBlocks < height) {
numBlocks++;
}
std::vector<long long> offsets(numBlocks);
size_t headerSize = ftell(fp); // sizeof(header)
long long offset =
headerSize +
numBlocks * sizeof(long long); // sizeof(header) + sizeof(offsetTable)
std::vector<unsigned char> data;
for (int i = 0; i < numBlocks; i++) {
int startY = numScanlineBlocks * i;
int endY = (std::min)(numScanlineBlocks * (i + 1), height);
int h = endY - startY;
std::vector<unsigned short> buf(4 * width * h);
for (int y = 0; y < h; y++) {
for (int x = 0; x < width; x++) {
FP32 r, g, b, a;
r.f = in_rgba[4 * ((y + startY) * width + x) + 0];
g.f = in_rgba[4 * ((y + startY) * width + x) + 1];
b.f = in_rgba[4 * ((y + startY) * width + x) + 2];
a.f = in_rgba[4 * ((y + startY) * width + x) + 3];
FP16 hr, hg, hb, ha;
hr = float_to_half_full(r);
hg = float_to_half_full(g);
hb = float_to_half_full(b);
ha = float_to_half_full(a);
// Assume increasing Y
buf[4 * y * width + 3 * width + x] = hr.u;
buf[4 * y * width + 2 * width + x] = hg.u;
buf[4 * y * width + 1 * width + x] = hb.u;
buf[4 * y * width + 0 * width + x] = ha.u;
}
}
int bound = miniz::mz_compressBound(buf.size() * sizeof(unsigned short));
std::vector<unsigned char> block(
miniz::mz_compressBound(buf.size() * sizeof(unsigned short)));
unsigned long long outSize = block.size();
CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size() * sizeof(unsigned short));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int dataLen = outSize; // truncate
memcpy(&header.at(0), &startY, sizeof(int));
memcpy(&header.at(4), &dataLen, sizeof(unsigned int));
data.insert(data.end(), header.begin(), header.end());
data.insert(data.end(), block.begin(), block.begin() + dataLen);
offsets[i] = offset;
offset += dataLen + 8; // 8 = sizeof(blockHeader)
}
fwrite(&offsets.at(0), 1, sizeof(unsigned long long) * numBlocks, fp);
fwrite(&data.at(0), 1, data.size(), fp);
fclose(fp);
return 0; // OK
}
#endif
size_t SaveMultiChannelEXRToMemory(const EXRImage *exrImage,
unsigned char **memory_out,
const char **err) {
if (exrImage == NULL || memory_out == NULL || exrImage->compression < 0 ||
exrImage->compression > TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "Invalid argument.";
}
return 0;
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
memory.insert(memory.end(), header, header + 4);
}
// Version, scanline.
{
const char marker[] = {2, 0, 0, 0};
memory.insert(memory.end(), marker, marker + 4);
}
int numScanlineBlocks = 1;
if (exrImage->compression == TINYEXR_COMPRESSIONTYPE_ZIP) {
numScanlineBlocks = 16;
} else if (exrImage->compression == TINYEXR_COMPRESSIONTYPE_PIZ) {
numScanlineBlocks = 32;
}
// Write attributes.
{
std::vector<unsigned char> data;
std::vector<ChannelInfo> channels;
for (int c = 0; c < exrImage->num_channels; c++) {
ChannelInfo info;
info.pLinear = 0;
info.pixelType = exrImage->requested_pixel_types[c];
info.xSampling = 1;
info.ySampling = 1;
info.name = std::string(exrImage->channel_names[c]);
channels.push_back(info);
}
WriteChannelInfo(data, channels);
WriteAttributeToMemory(memory, "channels", "chlist", &data.at(0),
data.size()); // +1 = null
}
{
int comp = exrImage->compression;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&comp));
}
WriteAttributeToMemory(memory, "compression", "compression",
reinterpret_cast<const unsigned char *>(&comp), 1);
}
{
int data[4] = {0, 0, exrImage->width - 1, exrImage->height - 1};
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&data[0]));
swap4(reinterpret_cast<unsigned int *>(&data[1]));
swap4(reinterpret_cast<unsigned int *>(&data[2]));
swap4(reinterpret_cast<unsigned int *>(&data[3]));
}
WriteAttributeToMemory(memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
WriteAttributeToMemory(memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
}
{
unsigned char lineOrder = 0; // increasingY
WriteAttributeToMemory(memory, "lineOrder", "lineOrder", &lineOrder, 1);
}
{
float aspectRatio = 1.0f;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&aspectRatio));
}
WriteAttributeToMemory(
memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float));
}
{
float center[2] = {0.0f, 0.0f};
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(¢er[0]));
swap4(reinterpret_cast<unsigned int *>(¢er[1]));
}
WriteAttributeToMemory(memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char *>(center),
2 * sizeof(float));
}
{
float w = (float)exrImage->width;
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&w));
}
WriteAttributeToMemory(memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char *>(&w),
sizeof(float));
}
// Custom attributes
if (exrImage->num_custom_attributes > 0) {
// @todo { endian }
for (int i = 0; i < exrImage->num_custom_attributes; i++) {
WriteAttributeToMemory(memory, exrImage->custom_attributes[i].name,
exrImage->custom_attributes[i].type,
reinterpret_cast<const unsigned char *>(
&exrImage->custom_attributes[i].value),
exrImage->custom_attributes[i].size);
}
}
{ // end of header
unsigned char e = 0;
memory.push_back(e);
}
int numBlocks = exrImage->height / numScanlineBlocks;
if (numBlocks * numScanlineBlocks < exrImage->height) {
numBlocks++;
}
std::vector<long long> offsets(numBlocks);
size_t headerSize = memory.size();
long long offset =
headerSize +
numBlocks * sizeof(long long); // sizeof(header) + sizeof(offsetTable)
std::vector<unsigned char> data;
bool isBigEndian = IsBigEndian();
std::vector<std::vector<unsigned char> > dataList(numBlocks);
std::vector<size_t> channelOffsetList(exrImage->num_channels);
int pixelDataSize = 0;
size_t channelOffset = 0;
for (int c = 0; c < exrImage->num_channels; c++) {
channelOffsetList[c] = channelOffset;
if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixelDataSize += sizeof(unsigned short);
channelOffset += sizeof(unsigned short);
} else if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
pixelDataSize += sizeof(float);
channelOffset += sizeof(float);
} else if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixelDataSize += sizeof(unsigned int);
channelOffset += sizeof(unsigned int);
} else {
assert(0);
}
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < numBlocks; i++) {
int startY = numScanlineBlocks * i;
int endY = (std::min)(numScanlineBlocks * (i + 1), exrImage->height);
int h = endY - startY;
std::vector<unsigned char> buf(exrImage->width * h * pixelDataSize);
for (int c = 0; c < exrImage->num_channels; c++) {
if (exrImage->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exrImage->width; x++) {
FP16 h16;
h16.u = reinterpret_cast<unsigned short **>(
exrImage->images)[c][(y + startY) * exrImage->width + x];
FP32 f32 = half_to_float(h16);
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&f32.f));
}
// Assume increasing Y
float *linePtr = reinterpret_cast<float *>(
&buf.at(pixelDataSize * y * exrImage->width +
channelOffsetList[c] * exrImage->width));
linePtr[x] = f32.f;
}
}
} else if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exrImage->width; x++) {
unsigned short val = reinterpret_cast<unsigned short **>(
exrImage->images)[c][(y + startY) * exrImage->width + x];
if (isBigEndian) {
swap2(&val);
}
// Assume increasing Y
unsigned short *linePtr = reinterpret_cast<unsigned short *>(
&buf.at(pixelDataSize * y * exrImage->width +
channelOffsetList[c] * exrImage->width));
linePtr[x] = val;
}
}
} else {
assert(0);
}
} else if (exrImage->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
if (exrImage->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exrImage->width; x++) {
FP32 f32;
f32.f = reinterpret_cast<float **>(
exrImage->images)[c][(y + startY) * exrImage->width + x];
FP16 h16;
h16 = float_to_half_full(f32);
if (isBigEndian) {
swap2(reinterpret_cast<unsigned short *>(&h16.u));
}
// Assume increasing Y
unsigned short *linePtr = reinterpret_cast<unsigned short *>(
&buf.at(pixelDataSize * y * exrImage->width +
channelOffsetList[c] * exrImage->width));
linePtr[x] = h16.u;
}
}
} else if (exrImage->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exrImage->width; x++) {
float val = reinterpret_cast<float **>(
exrImage->images)[c][(y + startY) * exrImage->width + x];
if (isBigEndian) {
swap4(reinterpret_cast<unsigned int *>(&val));
}
// Assume increasing Y
float *linePtr = reinterpret_cast<float *>(
&buf.at(pixelDataSize * y * exrImage->width +
channelOffsetList[c] * exrImage->width));
linePtr[x] = val;
}
}
} else {
assert(0);
}
} else if (exrImage->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < h; y++) {
for (int x = 0; x < exrImage->width; x++) {
unsigned int val = reinterpret_cast<unsigned int **>(
exrImage->images)[c][(y + startY) * exrImage->width + x];
if (isBigEndian) {
swap4(&val);
}
// Assume increasing Y
unsigned int *linePtr = reinterpret_cast<unsigned int *>(
&buf.at(pixelDataSize * y * exrImage->width +
channelOffsetList[c] * exrImage->width));
linePtr[x] = val;
}
}
}
}
if (exrImage->compression == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
std::vector<unsigned char> header(8);
unsigned int dataLen = (unsigned int)buf.size();
memcpy(&header.at(0), &startY, sizeof(int));
memcpy(&header.at(4), &dataLen, sizeof(unsigned int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
}
dataList[i].insert(dataList[i].end(), header.begin(), header.end());
dataList[i].insert(dataList[i].end(), buf.begin(), buf.begin() + dataLen);
} else if ((exrImage->compression == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(exrImage->compression == TINYEXR_COMPRESSIONTYPE_ZIP)) {
std::vector<unsigned char> block(miniz::mz_compressBound(buf.size()));
unsigned long long outSize = block.size();
CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size());
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
std::vector<unsigned char> header(8);
unsigned int dataLen = outSize; // truncate
memcpy(&header.at(0), &startY, sizeof(int));
memcpy(&header.at(4), &dataLen, sizeof(unsigned int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&header.at(0)));
swap4(reinterpret_cast<unsigned int *>(&header.at(4)));
}
dataList[i].insert(dataList[i].end(), header.begin(), header.end());
dataList[i].insert(dataList[i].end(), block.begin(),
block.begin() + dataLen);
} else if (exrImage->compression == TINYEXR_COMPRESSIONTYPE_PIZ) {
// @todo
assert(0);
} else {
assert(0);
}
} // omp parallel
for (int i = 0; i < numBlocks; i++) {
data.insert(data.end(), dataList[i].begin(), dataList[i].end());
offsets[i] = offset;
if (IsBigEndian()) {
swap8(reinterpret_cast<unsigned long long *>(&offsets[i]));
}
offset += dataList[i].size();
}
{
memory.insert(memory.end(),
reinterpret_cast<unsigned char *>(&offsets.at(0)),
reinterpret_cast<unsigned char *>(&offsets.at(0)) +
sizeof(unsigned long long) * numBlocks);
}
{ memory.insert(memory.end(), data.begin(), data.end()); }
assert(memory.size() > 0);
(*memory_out) = (unsigned char *)malloc(memory.size());
memcpy((*memory_out), &memory.at(0), memory.size());
return memory.size(); // OK
}
int SaveMultiChannelEXRToFile(const EXRImage *exrImage, const char *filename,
const char **err) {
if (exrImage == NULL || filename == NULL || exrImage->compression < 0 ||
exrImage->compression > TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "wb");
if (!fp) {
if (err) {
(*err) = "Cannot write a file.";
}
return -1;
}
unsigned char *mem = NULL;
size_t mem_size = SaveMultiChannelEXRToMemory(exrImage, &mem, err);
if ((mem_size > 0) && mem) {
fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
return 0; // OK
}
int LoadDeepEXR(DeepImage *deepImage, const char *filename, const char **err) {
if (deepImage == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return -1;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = ftell(fp);
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
if (err) {
(*err) = "File size is zero.";
}
return -1;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
if (err) {
(*err) = "Header mismatch.";
}
return -3;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
if (err) {
(*err) = "Unsupported version or scanline.";
}
return -4;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int numScanlineBlocks = 1; // 16 for ZIP compression.
int compressionType = -1;
int numChannels = -1;
std::vector<ChannelInfo> channels;
// Read attributes
for (;;) {
std::string attrName;
std::string attrType;
std::vector<unsigned char> data;
const char *marker_next = ReadAttribute(attrName, attrType, data, marker);
if (marker_next == NULL) {
marker++; // skip '\0'
break;
}
if (attrName.compare("compression") == 0) {
// must be 0:No compression, 1: RLE, 2: ZIPs or 3: ZIP
if (data[0] > 3) {
if (err) {
(*err) = "Unsupported compression type.";
}
return -5;
}
compressionType = data[0];
if (compressionType == 3) { // ZIP
numScanlineBlocks = 16;
}
} else if (attrName.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
ReadChannelInfo(channels, data);
numChannels = channels.size();
if (numChannels < 1) {
if (err) {
(*err) = "Invalid channels format.";
}
return -6;
}
} else if (attrName.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&dx));
swap4(reinterpret_cast<unsigned int *>(&dy));
swap4(reinterpret_cast<unsigned int *>(&dw));
swap4(reinterpret_cast<unsigned int *>(&dh));
}
} else if (attrName.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&x));
swap4(reinterpret_cast<unsigned int *>(&y));
swap4(reinterpret_cast<unsigned int *>(&w));
swap4(reinterpret_cast<unsigned int *>(&h));
}
}
marker = marker_next;
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(numChannels >= 1);
int dataWidth = dw - dx + 1;
int dataHeight = dh - dy + 1;
std::vector<float> image(dataWidth * dataHeight * 4); // 4 = RGBA
// Read offset tables.
int numBlocks = dataHeight / numScanlineBlocks;
if (numBlocks * numScanlineBlocks < dataHeight) {
numBlocks++;
}
std::vector<long long> offsets(numBlocks);
for (int y = 0; y < numBlocks; y++) {
long long offset;
memcpy(&offset, marker, sizeof(long long));
if (IsBigEndian()) {
swap8(reinterpret_cast<unsigned long long *>(&offset));
}
marker += sizeof(long long); // = 8
offsets[y] = offset;
}
if (compressionType != 0 && compressionType != 2 && compressionType != 3) {
if (err) {
(*err) = "Unsupported format.";
}
return -10;
}
deepImage->image = (float ***)malloc(sizeof(float **) * numChannels);
for (int c = 0; c < numChannels; c++) {
deepImage->image[c] = (float **)malloc(sizeof(float *) * dataHeight);
for (int y = 0; y < dataHeight; y++) {
}
}
deepImage->offset_table = (int **)malloc(sizeof(int *) * dataHeight);
for (int y = 0; y < dataHeight; y++) {
deepImage->offset_table[y] = (int *)malloc(sizeof(int) * dataWidth);
}
for (int y = 0; y < numBlocks; y++) {
const unsigned char *dataPtr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int lineNo;
long long packedOffsetTableSize;
long long packedSampleDataSize;
long long unpackedSampleDataSize;
memcpy(&lineNo, dataPtr, sizeof(int));
memcpy(&packedOffsetTableSize, dataPtr + 4, sizeof(long long));
memcpy(&packedSampleDataSize, dataPtr + 12, sizeof(long long));
memcpy(&unpackedSampleDataSize, dataPtr + 20, sizeof(long long));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&lineNo));
swap8(reinterpret_cast<unsigned long long *>(&packedOffsetTableSize));
swap8(reinterpret_cast<unsigned long long *>(&packedSampleDataSize));
swap8(reinterpret_cast<unsigned long long *>(&unpackedSampleDataSize));
}
std::vector<int> pixelOffsetTable(dataWidth);
// decode pixel offset table.
{
unsigned long dstLen = pixelOffsetTable.size() * sizeof(int);
DecompressZip(reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
dstLen, dataPtr + 28, packedOffsetTableSize);
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (int i = 0; i < dataWidth; i++) {
deepImage->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sampleData(unpackedSampleDataSize);
// decode sample data.
{
unsigned long dstLen = unpackedSampleDataSize;
DecompressZip(reinterpret_cast<unsigned char *>(&sampleData.at(0)),
dstLen, dataPtr + 28 + packedOffsetTableSize,
packedSampleDataSize);
assert(dstLen == (unsigned long)unpackedSampleDataSize);
}
// decode sample
int sampleSize = -1;
std::vector<int> channelOffsetList(numChannels);
{
int channelOffset = 0;
for (int i = 0; i < numChannels; i++) {
channelOffsetList[i] = channelOffset;
if (channels[i].pixelType == TINYEXR_PIXELTYPE_UINT) { // UINT
channelOffset += 4;
} else if (channels[i].pixelType == TINYEXR_PIXELTYPE_HALF) { // half
channelOffset += 2;
} else if (channels[i].pixelType == TINYEXR_PIXELTYPE_FLOAT) { // float
channelOffset += 4;
} else {
assert(0);
}
}
sampleSize = channelOffset;
}
assert(sampleSize >= 2);
assert((size_t)(pixelOffsetTable[dataWidth - 1] * sampleSize) ==
sampleData.size());
int samplesPerLine = sampleData.size() / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
unsigned long long dataOffset = 0;
for (int c = 0; c < numChannels; c++) {
deepImage->image[c][y] =
(float *)malloc(sizeof(float) * samplesPerLine);
if (channels[c].pixelType == 0) { // UINT
for (int x = 0; x < samplesPerLine; x++) {
unsigned int ui = *reinterpret_cast<unsigned int *>(
&sampleData.at(dataOffset + x * sizeof(int)));
deepImage->image[c][y][x] = (float)ui; // @fixme
}
dataOffset += sizeof(unsigned int) * samplesPerLine;
} else if (channels[c].pixelType == 1) { // half
for (int x = 0; x < samplesPerLine; x++) {
FP16 f16;
f16.u = *reinterpret_cast<unsigned short *>(
&sampleData.at(dataOffset + x * sizeof(short)));
FP32 f32 = half_to_float(f16);
deepImage->image[c][y][x] = f32.f;
}
dataOffset += sizeof(short) * samplesPerLine;
} else { // float
for (int x = 0; x < samplesPerLine; x++) {
float f = *reinterpret_cast<float *>(
&sampleData.at(dataOffset + x * sizeof(float)));
deepImage->image[c][y][x] = f;
}
dataOffset += sizeof(float) * samplesPerLine;
}
}
}
} // y
deepImage->width = dataWidth;
deepImage->height = dataHeight;
deepImage->channel_names =
(const char **)malloc(sizeof(const char *) * numChannels);
for (int c = 0; c < numChannels; c++) {
#ifdef _WIN32
deepImage->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deepImage->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deepImage->num_channels = numChannels;
return 0; // OK
}
int SaveDeepEXR(const DeepImage *deepImage, const char *filename,
const char **err) {
if (deepImage == NULL || filename == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot write file.";
}
return -1;
}
// Write header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
size_t n = fwrite(header, 1, 4, fp);
if (n != 4) {
if (err) {
(*err) = "Header write failed.";
}
fclose(fp);
return -3;
}
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
const char data[] = {2, 8, 0, 0};
size_t n = fwrite(data, 1, 4, fp);
if (n != 4) {
if (err) {
(*err) = "Flag write failed.";
}
fclose(fp);
return -3;
}
}
// Write attributes.
{
int data = 2; // ZIPS
WriteAttribute(fp, "compression", "compression",
reinterpret_cast<const unsigned char *>(&data), sizeof(int));
}
{
int data[4] = {0, 0, deepImage->width - 1, deepImage->height - 1};
WriteAttribute(fp, "dataWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
WriteAttribute(fp, "displayWindow", "box2i",
reinterpret_cast<const unsigned char *>(data),
sizeof(int) * 4);
}
int numScanlineBlocks = 1;
// Write offset tables.
int numBlocks = deepImage->height / numScanlineBlocks;
if (numBlocks * numScanlineBlocks < deepImage->height) {
numBlocks++;
}
#if 0 // @todo
std::vector<long long> offsets(numBlocks);
//std::vector<int> pixelOffsetTable(dataWidth);
// compress pixel offset table.
{
unsigned long dstLen = pixelOffsetTable.size() * sizeof(int);
Compresses(reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
dstLen, dataPtr + 28, packedOffsetTableSize);
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
// int ret =
// miniz::mz_uncompress(reinterpret_cast<unsigned char
// *>(&pixelOffsetTable.at(0)), &dstLen, dataPtr + 28,
// packedOffsetTableSize);
// printf("ret = %d, dstLen = %d\n", ret, (int)dstLen);
//
for (int i = 0; i < dataWidth; i++) {
// printf("offt[%d] = %d\n", i, pixelOffsetTable[i]);
deepImage->offset_table[y][i] = pixelOffsetTable[i];
}
}
for (int y = 0; y < numBlocks; y++) {
//long long offset = *(reinterpret_cast<const long long *>(marker));
// printf("offset[%d] = %lld\n", y, offset);
//marker += sizeof(long long); // = 8
offsets[y] = offset;
}
// Write offset table.
fwrite(&offsets.at(0), sizeof(long long), numBlocks, fp);
for (int y = 0; y < numBlocks; y++) {
const unsigned char *dataPtr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int lineNo = *reinterpret_cast<const int *>(dataPtr);
long long packedOffsetTableSize =
*reinterpret_cast<const long long *>(dataPtr + 4);
long long packedSampleDataSize =
*reinterpret_cast<const long long *>(dataPtr + 12);
long long unpackedSampleDataSize =
*reinterpret_cast<const long long *>(dataPtr + 20);
// printf("line: %d, %lld/%lld/%lld\n", lineNo, packedOffsetTableSize,
// packedSampleDataSize, unpackedSampleDataSize);
int endLineNo = (std::min)(lineNo + numScanlineBlocks, dataHeight);
int numLines = endLineNo - lineNo;
// printf("numLines: %d\n", numLines);
std::vector<int> pixelOffsetTable(dataWidth);
// decode pixel offset table.
{
unsigned long dstLen = pixelOffsetTable.size() * sizeof(int);
DecompressZip(reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
dstLen, dataPtr + 28, packedOffsetTableSize);
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
// int ret =
// miniz::mz_uncompress(reinterpret_cast<unsigned char
// *>(&pixelOffsetTable.at(0)), &dstLen, dataPtr + 28,
// packedOffsetTableSize);
// printf("ret = %d, dstLen = %d\n", ret, (int)dstLen);
//
for (int i = 0; i < dataWidth; i++) {
// printf("offt[%d] = %d\n", i, pixelOffsetTable[i]);
deepImage->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sampleData(unpackedSampleDataSize);
// decode sample data.
{
unsigned long dstLen = unpackedSampleDataSize;
// printf("dstLen = %d\n", dstLen);
// printf("srcLen = %d\n", packedSampleDataSize);
DecompressZip(reinterpret_cast<unsigned char *>(&sampleData.at(0)),
dstLen, dataPtr + 28 + packedOffsetTableSize,
packedSampleDataSize);
assert(dstLen == unpackedSampleDataSize);
}
// decode sample
int sampleSize = -1;
std::vector<int> channelOffsetList(numChannels);
{
int channelOffset = 0;
for (int i = 0; i < numChannels; i++) {
// printf("offt[%d] = %d\n", i, channelOffset);
channelOffsetList[i] = channelOffset;
if (channels[i].pixelType == 0) { // UINT
channelOffset += 4;
} else if (channels[i].pixelType == 1) { // half
channelOffset += 2;
} else if (channels[i].pixelType == 2) { // float
channelOffset += 4;
} else {
assert(0);
}
}
sampleSize = channelOffset;
}
assert(sampleSize >= 2);
assert(pixelOffsetTable[dataWidth - 1] * sampleSize == sampleData.size());
int samplesPerLine = sampleData.size() / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
unsigned long long dataOffset = 0;
for (int c = 0; c < numChannels; c++) {
deepImage->image[c][y] =
(float *)malloc(sizeof(float) * samplesPerLine);
// unsigned int channelOffset = channelOffsetList[c];
// unsigned int i = channelOffset;
// printf("channel = %d. name = %s. ty = %d\n", c,
// channels[c].name.c_str(), channels[c].pixelType);
// printf("dataOffset = %d\n", (int)dataOffset);
if (channels[c].pixelType == 0) { // UINT
for (int x = 0; x < samplesPerLine; x++) {
unsigned int ui = *reinterpret_cast<unsigned int *>(
&sampleData.at(dataOffset + x * sizeof(int)));
deepImage->image[c][y][x] = (float)ui; // @fixme
}
dataOffset += sizeof(unsigned int) * samplesPerLine;
} else if (channels[c].pixelType == 1) { // half
for (int x = 0; x < samplesPerLine; x++) {
FP16 f16;
f16.u = *reinterpret_cast<unsigned short *>(
&sampleData.at(dataOffset + x * sizeof(short)));
FP32 f32 = half_to_float(f16);
deepImage->image[c][y][x] = f32.f;
// printf("c[%d] f(half) = %f (0x%08x)\n", c, f32.f, f16.u);
}
dataOffset += sizeof(short) * samplesPerLine;
} else { // float
for (int x = 0; x < samplesPerLine; x++) {
float f = *reinterpret_cast<float *>(
&sampleData.at(dataOffset + x * sizeof(float)));
// printf(" f = %f(0x%08x)\n", f, *((unsigned int *)&f));
deepImage->image[c][y][x] = f;
}
dataOffset += sizeof(float) * samplesPerLine;
}
}
// printf("total: %d\n", dataOffset);
}
} // y
#endif
fclose(fp);
return 0; // OK
}
void InitEXRImage(EXRImage *exrImage) {
if (exrImage == NULL) {
return;
}
exrImage->num_custom_attributes = 0;
exrImage->num_channels = 0;
exrImage->channel_names = NULL;
exrImage->images = NULL;
exrImage->pixel_types = NULL;
exrImage->requested_pixel_types = NULL;
exrImage->compression = TINYEXR_COMPRESSIONTYPE_ZIP;
}
int FreeEXRImage(EXRImage *exrImage) {
if (exrImage == NULL) {
return -1; // Err
}
for (int i = 0; i < exrImage->num_channels; i++) {
if (exrImage->channel_names && exrImage->channel_names[i]) {
free((char *)exrImage->channel_names[i]); // remove const
}
if (exrImage->images && exrImage->images[i]) {
free(exrImage->images[i]);
}
}
if (exrImage->channel_names) {
free(exrImage->channel_names);
}
if (exrImage->images) {
free(exrImage->images);
}
if (exrImage->pixel_types) {
free(exrImage->pixel_types);
}
if (exrImage->requested_pixel_types) {
free(exrImage->requested_pixel_types);
}
for (int i = 0; i < exrImage->num_custom_attributes; i++) {
if (exrImage->custom_attributes[i].name) {
free(exrImage->custom_attributes[i].name);
}
if (exrImage->custom_attributes[i].type) {
free(exrImage->custom_attributes[i].type);
}
if (exrImage->custom_attributes[i].value) {
free(exrImage->custom_attributes[i].value);
}
}
return 0;
}
int ParseMultiChannelEXRHeaderFromFile(EXRImage *exrImage, const char *filename,
const char **err) {
if (exrImage == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
FILE *fp = fopen(filename, "rb");
if (!fp) {
if (err) {
(*err) = "Cannot read file.";
}
return -1;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = ftell(fp);
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return ParseMultiChannelEXRHeaderFromMemory(exrImage, &buf.at(0), err);
}
int ParseMultiChannelEXRHeaderFromMemory(EXRImage *exrImage,
const unsigned char *memory,
const char **err) {
if (exrImage == NULL || memory == NULL) {
if (err) {
(*err) = "Invalid argument.";
}
return -1;
}
const char *buf = reinterpret_cast<const char *>(memory);
const char *marker = &buf[0];
// Header check.
{
const char header[] = {0x76, 0x2f, 0x31, 0x01};
if (memcmp(marker, header, 4) != 0) {
if (err) {
(*err) = "Header mismatch.";
}
return -3;
}
marker += 4;
}
// Version, scanline.
{
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 0 || marker[2] != 0 || marker[3] != 0) {
if (err) {
(*err) = "Unsupported version or scanline.";
}
return -4;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int numChannels = -1;
int displayWindow[4] = {-1, -1, -1, -1}; // @fixme.
float screenWindowCenter[2] = {0.0f, 0.0f}; // @fixme
float screenWindowWidth = 1.0f; // @fixme
float pixelAspectRatio = 1.0f;
unsigned char lineOrder = 0; // 0 -> increasing y; 1 -> decreasing
std::vector<ChannelInfo> channels;
int compressionType = 0; // @fixme
int numCustomAttributes = 0;
std::vector<EXRAttribute> customAttribs;
// Read attributes
for (;;) {
std::string attrName;
std::string attrType;
std::vector<unsigned char> data;
const char *marker_next = ReadAttribute(attrName, attrType, data, marker);
if (marker_next == NULL) {
marker++; // skip '\0'
break;
}
if (attrName.compare("compression") == 0) {
// must be 0:No compression, 1: RLE, 2: ZIPs, 3: ZIP or 4: PIZ
if (data[0] > TINYEXR_COMPRESSIONTYPE_PIZ) {
if (err) {
(*err) = "Unsupported compression type.";
}
return -5;
}
compressionType = data[0];
} else if (attrName.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
ReadChannelInfo(channels, data);
numChannels = channels.size();
if (numChannels < 1) {
if (err) {
(*err) = "Invalid channels format.";
}
return -6;
}
} else if (attrName.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&dx));
swap4(reinterpret_cast<unsigned int *>(&dy));
swap4(reinterpret_cast<unsigned int *>(&dw));
swap4(reinterpret_cast<unsigned int *>(&dh));
}
} else if (attrName.compare("displayWindow") == 0) {
memcpy(&displayWindow[0], &data.at(0), sizeof(int));
memcpy(&displayWindow[1], &data.at(4), sizeof(int));
memcpy(&displayWindow[2], &data.at(8), sizeof(int));
memcpy(&displayWindow[3], &data.at(12), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&displayWindow[0]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[1]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[2]));
swap4(reinterpret_cast<unsigned int *>(&displayWindow[3]));
}
} else if (attrName.compare("lineOrder") == 0) {
int order;
memcpy(&order, &data.at(0), sizeof(int));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&order));
}
lineOrder = (unsigned char)order;
} else if (attrName.compare("pixelAspectRatio") == 0) {
memcpy(&pixelAspectRatio, &data.at(0), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&pixelAspectRatio));
}
} else if (attrName.compare("screenWindowCenter") == 0) {
memcpy(&screenWindowCenter[0], &data.at(0), sizeof(float));
memcpy(&screenWindowCenter[1], &data.at(4), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[0]));
swap4(reinterpret_cast<unsigned int *>(&screenWindowCenter[1]));
}
} else if (attrName.compare("screenWindowWidth") == 0) {
memcpy(&screenWindowWidth, &data.at(0), sizeof(float));
if (IsBigEndian()) {
swap4(reinterpret_cast<unsigned int *>(&screenWindowWidth));
}
} else {
// Custom attribute(up to TINYEXR_MAX_ATTRIBUTES)
if (numCustomAttributes < TINYEXR_MAX_ATTRIBUTES) {
EXRAttribute attrib;
attrib.name = strdup(attrName.c_str());
attrib.type = strdup(attrType.c_str());
attrib.size = data.size();
attrib.value = (unsigned char *)malloc(data.size());
memcpy((char *)attrib.value, &data.at(0), data.size());
customAttribs.push_back(attrib);
}
}
marker = marker_next;
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(numChannels >= 1);
int dataWidth = dw - dx + 1;
int dataHeight = dh - dy + 1;
{
exrImage->channel_names =
(const char **)malloc(sizeof(const char *) * numChannels);
for (int c = 0; c < numChannels; c++) {
#ifdef _WIN32
exrImage->channel_names[c] = _strdup(channels[c].name.c_str());
#else
exrImage->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
exrImage->num_channels = numChannels;
exrImage->width = dataWidth;
exrImage->height = dataHeight;
exrImage->pixel_aspect_ratio = pixelAspectRatio;
exrImage->screen_window_center[0] = screenWindowCenter[0];
exrImage->screen_window_center[1] = screenWindowCenter[1];
exrImage->screen_window_width = screenWindowWidth;
exrImage->display_window[0] = displayWindow[0];
exrImage->display_window[1] = displayWindow[1];
exrImage->display_window[2] = displayWindow[2];
exrImage->display_window[3] = displayWindow[3];
exrImage->data_window[0] = dx;
exrImage->data_window[1] = dy;
exrImage->data_window[2] = dw;
exrImage->data_window[3] = dh;
exrImage->line_order = lineOrder;
exrImage->compression = compressionType;
exrImage->pixel_types = (int *)malloc(sizeof(int) * numChannels);
for (int c = 0; c < numChannels; c++) {
exrImage->pixel_types[c] = channels[c].pixelType;
}
// Initially fill with values of `pixel-types`
exrImage->requested_pixel_types = (int *)malloc(sizeof(int) * numChannels);
for (int c = 0; c < numChannels; c++) {
exrImage->requested_pixel_types[c] = channels[c].pixelType;
}
}
if (numCustomAttributes > 0) {
assert(customAttribs.size() < TINYEXR_MAX_ATTRIBUTES);
exrImage->num_custom_attributes = numCustomAttributes;
for (int i = 0; i < (int)customAttribs.size(); i++) {
exrImage->custom_attributes[i] = customAttribs[i];
}
}
return 0; // OK
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif
#endif // __TINYEXR_H__
|
task_hello.c | //===-- task_hello.c - Example for the "task" construct -----------*- C -*-===//
//
// Part of the LOMP Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <unistd.h>
#include <omp.h>
#define NTASKS 16
#define MANY_TASKS 1
void create_task(int i, double d) {
#pragma omp task firstprivate(i) firstprivate(d)
{
double answer = i * d;
#if MANY_TASKS
#pragma omp task firstprivate(answer) firstprivate(i)
#endif
{
printf("Hello from task %d/1 on thread %d, and the answer is %lf (%lf x "
"%d)\n",
i, omp_get_thread_num(), answer, answer / i, i);
}
#if MANY_TASKS
#pragma omp task firstprivate(answer) firstprivate(i)
{
printf("Hello from task %d/2 on thread %d, and the answer is %lf (%lf x "
"%d)\n",
i, omp_get_thread_num(), answer, answer / i, i);
}
#endif
}
}
int main(void) {
double d = 42.0;
#pragma omp parallel
{
#pragma omp master
for (int i = 0; i < NTASKS; ++i) {
create_task(i + 1, d);
}
}
return 0;
}
|
expected_output.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
//---------------------------------------------------------------------
// program BT
//---------------------------------------------------------------------
//----------
// Class S:
//----------
//----------
// Class W:
//----------
//----------
// Class A:
//----------
//----------
// Class B:
//----------
//----------
// Class C:
//----------
//----------
// Class D:
//----------
//----------
// Class E:
//----------
struct anon_NAS_BT_c_87 {
double real;
double imag;
};
typedef struct anon_NAS_BT_c_87 dcomplex;
/*common /global/*/
int grid_points[3];
/*common /constants/*/
double tx1;
double tx2;
double tx3;
double ty1;
double ty2;
double ty3;
double tz1;
double tz2;
double tz3;
double dx1;
double dx2;
double dx3;
double dx4;
double dx5;
double dy1;
double dy2;
double dy3;
double dy4;
double dy5;
double dz1;
double dz2;
double dz3;
double dz4;
double dz5;
double dssp;
double dt;
double ce[5][13];
double dxmax;
double dymax;
double dzmax;
double xxcon1;
double xxcon2;
double xxcon3;
double xxcon4;
double xxcon5;
double dx1tx1;
double dx2tx1;
double dx3tx1;
double dx4tx1;
double dx5tx1;
double yycon1;
double yycon2;
double yycon3;
double yycon4;
double yycon5;
double dy1ty1;
double dy2ty1;
double dy3ty1;
double dy4ty1;
double dy5ty1;
double zzcon1;
double zzcon2;
double zzcon3;
double zzcon4;
double zzcon5;
double dz1tz1;
double dz2tz1;
double dz3tz1;
double dz4tz1;
double dz5tz1;
double dnxm1;
double dnym1;
double dnzm1;
double c1c2;
double c1c5;
double c3c4;
double c1345;
double conz1;
double c1;
double c2;
double c3;
double c4;
double c5;
double c4dssp;
double c5dssp;
double dtdssp;
double dttx1;
double dttx2;
double dtty1;
double dtty2;
double dttz1;
double dttz2;
double c2dttx1;
double c2dtty1;
double c2dttz1;
double comz1;
double comz4;
double comz5;
double comz6;
double c3c4tx3;
double c3c4ty3;
double c3c4tz3;
double c2iv;
double con43;
double con16;
// to improve cache performance, grid dimensions padded by 1
// for even number sizes only.
/*common /fields/*/
double us[24][25][25];
double vs[24][25][25];
double ws[24][25][25];
double qs[24][25][25];
double rho_i[24][25][25];
double square[24][25][25];
double forcing[24][25][25][5];
double u[24][25][25][5];
double rhs[24][25][25][5];
//-----------------------------------------------------------------------
// Timer constants
//-----------------------------------------------------------------------
void initialize();
void lhsinit(double lhs[][3][5][5], int size);
void exact_solution(double xi, double eta, double zeta, double dtemp[5]);
void exact_rhs();
void set_constants();
void adi();
void compute_rhs();
void x_solve();
void y_solve();
void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]);
void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
void binvrhs(double lhs[5][5], double r[5]);
void z_solve();
void add();
void error_norm(double rms[5]);
void rhs_norm(double rms[5]);
void verify(int no_time_steps, char *class, int *verified);
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified);
double start[64];
double elapsed[64];
double elapsed_time();
void timer_clear(int n);
void timer_start(int n);
void timer_stop(int n);
double timer_read(int n);
void wtime(double *t);
int main(int argc, char *argv[]) {
int i, niter, step;
double navg, mflops, n3;
double tmax;
double t;
double trecs[12];
int verified;
char Class;
char *t_names[12];
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - BT Benchmark\n\n");
niter = 200;
dt = 0.0008;
grid_points[0] = 24;
grid_points[1] = 24;
grid_points[2] = 24;
printf(" Size: %4dx%4dx%4d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %4d dt: %10.6f\n", niter, dt);
printf("\n");
if((grid_points[0] > 24) || (grid_points[1] > 24) || (grid_points[2] > 24)) {
printf(" %d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
return 0;
}
set_constants();
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 1; i <= 11; i++) {
timer_clear(i);
}
initialize();
exact_rhs();
//---------------------------------------------------------------------
// do one time step to touch all code, and reinitialize
//---------------------------------------------------------------------
adi();
initialize();
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 1; i <= 11; i++) {
timer_clear(i);
}
timer_start(1);
/*************** Clava msgError **************
Variables Access as passed arguments Can not be traced inside of function calls :
printf#277{printf(" Time step %4d\n", step)}
compute_rhs#280{compute_rhs()}
add#2666{add()}
****************************************/
for(step = 1; step <= niter; step++) {
if((step % 20) == 0 || step == 1) {
printf(" Time step %4d\n", step);
}
adi();
}
timer_stop(1);
tmax = timer_read(1);
verify(niter, &Class, &verified);
n3 = 1.0 * grid_points[0] * grid_points[1] * grid_points[2];
navg = (grid_points[0] + grid_points[1] + grid_points[2]) / 3.0;
if(tmax != 0.0) {
mflops = 1.0e-6 * (double) niter * (3478.8 * n3 - 17655.7 * (navg * navg) + 28023.7 * navg) / tmax;
}
else {
mflops = 0.0;
}
print_results("BT", Class, grid_points[0], grid_points[1], grid_points[2], niter, tmax, mflops, " floating point", verified);
int exitValue = verified ? 0 : 1;
return exitValue;
}
void adi() {
compute_rhs();
x_solve();
y_solve();
z_solve();
add();
}
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
void add() {
int i, j, k, m;
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points, rhs)
for(k = 1; k <= grid_points[2] - 2; k++) {
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points, rhs)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points, rhs)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m];
}
}
}
}
}
//---------------------------------------------------------------------
// this function computes the norm of the difference between the
// computed solution and the exact solution
//---------------------------------------------------------------------
void error_norm(double rms[5]) {
int i, j, k, m, d;
double xi;
double eta;
double zeta;
double u_exact[5];
double add;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rms[m] = 0.0;
}
#pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, add) firstprivate(dnzm1, dnym1, dnxm1, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi, add) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
#pragma omp parallel for default(shared) private(i, m, xi, add) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, u_exact);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
add = u[k][j][i][m] - u_exact[m];
rms[m] = rms[m] + add * add;
}
}
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(d = 0; d < 3; d++) {
rms[m] = rms[m] / (double) (grid_points[d] - 2);
}
rms[m] = sqrt(rms[m]);
}
}
void rhs_norm(double rms[5]) {
int i, j, k, d, m;
double add;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rms[m] = 0.0;
}
#pragma omp parallel for default(shared) private(k, j, i, m, add) firstprivate(grid_points, rhs) reduction(+ : rms[:5])
for(k = 1; k <= grid_points[2] - 2; k++) {
#pragma omp parallel for default(shared) private(j, i, m, add) firstprivate(k, grid_points, rhs) reduction(+ : rms[:5])
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, m, add) firstprivate(k, j, grid_points, rhs) reduction(+ : rms[:5])
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
add = rhs[k][j][i][m];
rms[m] = rms[m] + add * add;
}
}
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(d = 0; d < 3; d++) {
rms[m] = rms[m] / (double) (grid_points[d] - 2);
}
rms[m] = sqrt(rms[m]);
}
}
//---------------------------------------------------------------------
// compute the right hand side based on exact solution
//---------------------------------------------------------------------
void exact_rhs() {
double dtemp[5];
double xi;
double eta;
double zeta;
double dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
double cuf[25];
double q[25];
double ue[25][5];
double buf[25][5];
//---------------------------------------------------------------------
// initialize
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for(k = 0; k <= grid_points[2] - 1; k++) {
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points)
for(j = 0; j <= grid_points[1] - 1; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points)
for(i = 0; i <= grid_points[0] - 1; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = 0.0;
}
}
}
}
//---------------------------------------------------------------------
// xi-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, dtpp, im1, ip1) firstprivate(dnzm1, dnym1, dnxm1, tx2, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(k = 1; k <= grid_points[2] - 2; k++) {
zeta = (double) (k) * dnzm1;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi, dtpp, im1, ip1) firstprivate(dnym1, dnxm1, zeta, tx2, k, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(j = 1; j <= grid_points[1] - 2; j++) {
eta = (double) (j) * dnym1;
#pragma omp parallel for default(shared) private(i, m, xi, dtpp) firstprivate(dnxm1, zeta, eta, grid_points, ce, dtemp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 1; m < 5; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3];
q[i] = 0.5 * (buf[i][1] * ue[i][1] + buf[i][2] * ue[i][2] + buf[i][3] * ue[i][3]);
}
#pragma omp parallel for default(shared) private(i, im1, ip1) firstprivate(tx2, k, j, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, grid_points, ue, q, buf, cuf)
for(i = 1; i <= grid_points[0] - 2; i++) {
im1 = i - 1;
ip1 = i + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] - tx2 * (ue[ip1][1] - ue[im1][1]) + dx1tx1 * (ue[ip1][0] - 2.0 * ue[i][0] + ue[im1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - tx2 * ((ue[ip1][1] * buf[ip1][1] + c2 * (ue[ip1][4] - q[ip1])) - (ue[im1][1] * buf[im1][1] + c2 * (ue[im1][4] - q[im1]))) + xxcon1 * (buf[ip1][1] - 2.0 * buf[i][1] + buf[im1][1]) + dx2tx1 * (ue[ip1][1] - 2.0 * ue[i][1] + ue[im1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - tx2 * (ue[ip1][2] * buf[ip1][1] - ue[im1][2] * buf[im1][1]) + xxcon2 * (buf[ip1][2] - 2.0 * buf[i][2] + buf[im1][2]) + dx3tx1 * (ue[ip1][2] - 2.0 * ue[i][2] + ue[im1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - tx2 * (ue[ip1][3] * buf[ip1][1] - ue[im1][3] * buf[im1][1]) + xxcon2 * (buf[ip1][3] - 2.0 * buf[i][3] + buf[im1][3]) + dx4tx1 * (ue[ip1][3] - 2.0 * ue[i][3] + ue[im1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - tx2 * (buf[ip1][1] * (c1 * ue[ip1][4] - c2 * q[ip1]) - buf[im1][1] * (c1 * ue[im1][4] - c2 * q[im1])) + 0.5 * xxcon3 * (buf[ip1][0] - 2.0 * buf[i][0] + buf[im1][0]) + xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) + xxcon5 * (buf[ip1][4] - 2.0 * buf[i][4] + buf[im1][4]) + dx5tx1 * (ue[ip1][4] - 2.0 * ue[i][4] + ue[im1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
i = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
i = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
#pragma omp parallel for default(shared) private(i, m) firstprivate(dssp, k, j, grid_points, ue)
for(i = 3; i <= grid_points[0] - 4; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
i = grid_points[0] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 6.0 * ue[i][m] - 4.0 * ue[i + 1][m]);
i = grid_points[0] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[i - 2][m] - 4.0 * ue[i - 1][m] + 5.0 * ue[i][m]);
}
}
}
//---------------------------------------------------------------------
// eta-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, i, j, m, zeta, xi, eta, dtpp, jm1, jp1) firstprivate(dnzm1, dnxm1, dnym1, ty2, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(k = 1; k <= grid_points[2] - 2; k++) {
zeta = (double) (k) * dnzm1;
#pragma omp parallel for default(shared) private(i, j, m, xi, eta, dtpp, jm1, jp1) firstprivate(dnxm1, dnym1, zeta, ty2, k, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(i = 1; i <= grid_points[0] - 2; i++) {
xi = (double) (i) * dnxm1;
#pragma omp parallel for default(shared) private(j, m, eta, dtpp) firstprivate(dnym1, zeta, xi, grid_points, ce, dtemp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
exact_solution(xi, eta, zeta, dtemp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 1; m < 5; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3];
q[j] = 0.5 * (buf[j][1] * ue[j][1] + buf[j][2] * ue[j][2] + buf[j][3] * ue[j][3]);
}
#pragma omp parallel for default(shared) private(j, jm1, jp1) firstprivate(ty2, k, i, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, grid_points, ue, buf, q, cuf)
for(j = 1; j <= grid_points[1] - 2; j++) {
jm1 = j - 1;
jp1 = j + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] - ty2 * (ue[jp1][2] - ue[jm1][2]) + dy1ty1 * (ue[jp1][0] - 2.0 * ue[j][0] + ue[jm1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - ty2 * (ue[jp1][1] * buf[jp1][2] - ue[jm1][1] * buf[jm1][2]) + yycon2 * (buf[jp1][1] - 2.0 * buf[j][1] + buf[jm1][1]) + dy2ty1 * (ue[jp1][1] - 2.0 * ue[j][1] + ue[jm1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - ty2 * ((ue[jp1][2] * buf[jp1][2] + c2 * (ue[jp1][4] - q[jp1])) - (ue[jm1][2] * buf[jm1][2] + c2 * (ue[jm1][4] - q[jm1]))) + yycon1 * (buf[jp1][2] - 2.0 * buf[j][2] + buf[jm1][2]) + dy3ty1 * (ue[jp1][2] - 2.0 * ue[j][2] + ue[jm1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - ty2 * (ue[jp1][3] * buf[jp1][2] - ue[jm1][3] * buf[jm1][2]) + yycon2 * (buf[jp1][3] - 2.0 * buf[j][3] + buf[jm1][3]) + dy4ty1 * (ue[jp1][3] - 2.0 * ue[j][3] + ue[jm1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - ty2 * (buf[jp1][2] * (c1 * ue[jp1][4] - c2 * q[jp1]) - buf[jm1][2] * (c1 * ue[jm1][4] - c2 * q[jm1])) + 0.5 * yycon3 * (buf[jp1][0] - 2.0 * buf[j][0] + buf[jm1][0]) + yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) + yycon5 * (buf[jp1][4] - 2.0 * buf[j][4] + buf[jm1][4]) + dy5ty1 * (ue[jp1][4] - 2.0 * ue[j][4] + ue[jm1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
j = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
j = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
#pragma omp parallel for default(shared) private(j, m) firstprivate(dssp, k, i, grid_points, ue)
for(j = 3; j <= grid_points[1] - 4; j++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
j = grid_points[1] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 6.0 * ue[j][m] - 4.0 * ue[j + 1][m]);
j = grid_points[1] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[j - 2][m] - 4.0 * ue[j - 1][m] + 5.0 * ue[j][m]);
}
}
}
//---------------------------------------------------------------------
// zeta-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, i, k, m, eta, xi, zeta, dtpp, km1, kp1) firstprivate(dnym1, dnxm1, dnzm1, tz2, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(j = 1; j <= grid_points[1] - 2; j++) {
eta = (double) (j) * dnym1;
#pragma omp parallel for default(shared) private(i, k, m, xi, zeta, dtpp, km1, kp1) firstprivate(dnxm1, dnzm1, eta, tz2, j, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for(i = 1; i <= grid_points[0] - 2; i++) {
xi = (double) (i) * dnxm1;
#pragma omp parallel for default(shared) private(k, m, zeta, dtpp) firstprivate(dnzm1, eta, xi, grid_points, ce, dtemp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 1; m < 5; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2];
q[k] = 0.5 * (buf[k][1] * ue[k][1] + buf[k][2] * ue[k][2] + buf[k][3] * ue[k][3]);
}
#pragma omp parallel for default(shared) private(k, km1, kp1) firstprivate(tz2, j, i, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, grid_points, ue, buf, q, cuf)
for(k = 1; k <= grid_points[2] - 2; k++) {
km1 = k - 1;
kp1 = k + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] - tz2 * (ue[kp1][3] - ue[km1][3]) + dz1tz1 * (ue[kp1][0] - 2.0 * ue[k][0] + ue[km1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - tz2 * (ue[kp1][1] * buf[kp1][3] - ue[km1][1] * buf[km1][3]) + zzcon2 * (buf[kp1][1] - 2.0 * buf[k][1] + buf[km1][1]) + dz2tz1 * (ue[kp1][1] - 2.0 * ue[k][1] + ue[km1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - tz2 * (ue[kp1][2] * buf[kp1][3] - ue[km1][2] * buf[km1][3]) + zzcon2 * (buf[kp1][2] - 2.0 * buf[k][2] + buf[km1][2]) + dz3tz1 * (ue[kp1][2] - 2.0 * ue[k][2] + ue[km1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - tz2 * ((ue[kp1][3] * buf[kp1][3] + c2 * (ue[kp1][4] - q[kp1])) - (ue[km1][3] * buf[km1][3] + c2 * (ue[km1][4] - q[km1]))) + zzcon1 * (buf[kp1][3] - 2.0 * buf[k][3] + buf[km1][3]) + dz4tz1 * (ue[kp1][3] - 2.0 * ue[k][3] + ue[km1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - tz2 * (buf[kp1][3] * (c1 * ue[kp1][4] - c2 * q[kp1]) - buf[km1][3] * (c1 * ue[km1][4] - c2 * q[km1])) + 0.5 * zzcon3 * (buf[kp1][0] - 2.0 * buf[k][0] + buf[km1][0]) + zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) + zzcon5 * (buf[kp1][4] - 2.0 * buf[k][4] + buf[km1][4]) + dz5tz1 * (ue[kp1][4] - 2.0 * ue[k][4] + ue[km1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
k = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (5.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
k = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (-4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
#pragma omp parallel for default(shared) private(k, m) firstprivate(dssp, j, i, grid_points, ue)
for(k = 3; k <= grid_points[2] - 4; k++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
k = grid_points[2] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 6.0 * ue[k][m] - 4.0 * ue[k + 1][m]);
k = grid_points[2] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp * (ue[k - 2][m] - 4.0 * ue[k - 1][m] + 5.0 * ue[k][m]);
}
}
}
//---------------------------------------------------------------------
// now change the sign of the forcing function,
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for(k = 1; k <= grid_points[2] - 2; k++) {
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
forcing[k][j][i][m] = -1.0 * forcing[k][j][i][m];
}
}
}
}
}
//---------------------------------------------------------------------
// this function returns the exact solution at point xi, eta, zeta
//---------------------------------------------------------------------
void exact_solution(double xi, double eta, double zeta, double dtemp[5]) {
int m;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
dtemp[m] = ce[m][0] + xi * (ce[m][1] + xi * (ce[m][4] + xi * (ce[m][7] + xi * ce[m][10]))) + eta * (ce[m][2] + eta * (ce[m][5] + eta * (ce[m][8] + eta * ce[m][11]))) + zeta * (ce[m][3] + zeta * (ce[m][6] + zeta * (ce[m][9] + zeta * ce[m][12])));
}
}
//---------------------------------------------------------------------
// This subroutine initializes the field variable u using
// tri-linear transfinite interpolation of the boundary values
//---------------------------------------------------------------------
void initialize() {
int i, j, k, m, ix, iy, iz;
double xi;
double eta;
double zeta;
double Pface[2][3][5];
double Pxi;
double Peta;
double Pzeta;
double temp[5];
//---------------------------------------------------------------------
// Later (in compute_rhs) we compute 1/u for every element. A few of
// the corner elements are not used, but it convenient (and faster)
// to compute the whole thing with a simple loop. Make sure those
// values are nonzero by initializing the whole thing here.
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for(k = 0; k <= grid_points[2] - 1; k++) {
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points)
for(j = 0; j <= grid_points[1] - 1; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points)
for(i = 0; i <= grid_points[0] - 1; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = 1.0;
}
}
}
}
//---------------------------------------------------------------------
// first store the "interpolated" values everywhere on the grid
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, ix, iy, iz, m, zeta, eta, xi, Pxi, Peta, Pzeta) firstprivate(dnzm1, dnym1, dnxm1, grid_points, ce, Pface)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
#pragma omp parallel for default(shared) private(j, i, ix, iy, iz, m, eta, xi, Pxi, Peta, Pzeta) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, Pface)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
#pragma omp parallel for default(shared) private(i, ix, iy, iz, m, xi, Pxi, Peta, Pzeta) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, Pface)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(ix = 0; ix < 2; ix++) {
exact_solution((double) ix, eta, zeta, Pface[ix][0]);
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(iy = 0; iy < 2; iy++) {
exact_solution(xi, (double) iy, zeta, Pface[iy][1]);
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double) iz, Pface[iz][2]);
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m];
u[k][j][i][m] = Pxi + Peta + Pzeta - Pxi * Peta - Pxi * Pzeta - Peta * Pzeta + Pxi * Peta * Pzeta;
}
}
}
}
//---------------------------------------------------------------------
// now store the exact values on the boundaries
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// west face
//---------------------------------------------------------------------
i = 0;
xi = 0.0;
#pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
#pragma omp parallel for default(shared) private(j, m, eta) firstprivate(dnym1, zeta, xi, k, i, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// east face
//---------------------------------------------------------------------
i = grid_points[0] - 1;
xi = 1.0;
#pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
#pragma omp parallel for default(shared) private(j, m, eta) firstprivate(dnym1, zeta, xi, k, i, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// south face
//---------------------------------------------------------------------
j = 0;
eta = 0.0;
#pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
#pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// north face
//---------------------------------------------------------------------
j = grid_points[1] - 1;
eta = 1.0;
#pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp)
for(k = 0; k <= grid_points[2] - 1; k++) {
zeta = (double) (k) * dnzm1;
#pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// bottom face
//---------------------------------------------------------------------
k = 0;
zeta = 0.0;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
#pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// top face
//---------------------------------------------------------------------
k = grid_points[2] - 1;
zeta = 1.0;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp)
for(j = 0; j <= grid_points[1] - 1; j++) {
eta = (double) (j) * dnym1;
#pragma omp parallel for default(shared) private(i, m, xi) firstprivate(dnxm1, zeta, eta, k, j, grid_points, ce, temp)
for(i = 0; i <= grid_points[0] - 1; i++) {
xi = (double) (i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
u[k][j][i][m] = temp[m];
}
}
}
}
void lhsinit(double lhs[][3][5][5], int size) {
int i, m, n;
i = size;
//---------------------------------------------------------------------
// zero the whole left hand side for starters
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(n = 0; n < 5; n++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
lhs[0][0][n][m] = 0.0;
lhs[0][1][n][m] = 0.0;
lhs[0][2][n][m] = 0.0;
lhs[i][0][n][m] = 0.0;
lhs[i][1][n][m] = 0.0;
lhs[i][2][n][m] = 0.0;
}
}
//---------------------------------------------------------------------
// next, set all diagonal values to 1. This is overkill, but convenient
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
lhs[0][1][m][m] = 1.0;
lhs[i][1][m][m] = 1.0;
}
}
void compute_rhs() {
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
//---------------------------------------------------------------------
// compute the reciprocal of density, and the kinetic energy,
// and the speed of sound.
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, rho_inv) firstprivate(grid_points, u)
for(k = 0; k <= grid_points[2] - 1; k++) {
#pragma omp parallel for default(shared) private(j, i, rho_inv) firstprivate(k, grid_points, u)
for(j = 0; j <= grid_points[1] - 1; j++) {
#pragma omp parallel for default(shared) private(i, rho_inv) firstprivate(k, j, grid_points, u)
for(i = 0; i <= grid_points[0] - 1; i++) {
rho_inv = 1.0 / u[k][j][i][0];
rho_i[k][j][i] = rho_inv;
us[k][j][i] = u[k][j][i][1] * rho_inv;
vs[k][j][i] = u[k][j][i][2] * rho_inv;
ws[k][j][i] = u[k][j][i][3] * rho_inv;
square[k][j][i] = 0.5 * (u[k][j][i][1] * u[k][j][i][1] + u[k][j][i][2] * u[k][j][i][2] + u[k][j][i][3] * u[k][j][i][3]) * rho_inv;
qs[k][j][i] = square[k][j][i] * rho_inv;
}
}
}
//---------------------------------------------------------------------
// copy the exact forcing term to the right hand side; because
// this forcing term is known, we can store it on the whole grid
// including the boundary
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points, forcing)
for(k = 0; k <= grid_points[2] - 1; k++) {
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, grid_points, forcing)
for(j = 0; j <= grid_points[1] - 1; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, grid_points, forcing)
for(i = 0; i <= grid_points[0] - 1; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = forcing[k][j][i][m];
}
}
}
}
//---------------------------------------------------------------------
// compute xi-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, uijk, up1, um1) firstprivate(dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, dssp, grid_points, us, u, square, vs, ws, qs, rho_i)
for(k = 1; k <= grid_points[2] - 2; k++) {
#pragma omp parallel for default(shared) private(j, i, uijk, up1, um1) firstprivate(k, dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, grid_points, us, u, square, vs, ws, qs, rho_i)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, uijk, up1, um1) firstprivate(k, j, dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, grid_points, us, u, square, vs, ws, qs, rho_i)
for(i = 1; i <= grid_points[0] - 2; i++) {
uijk = us[k][j][i];
up1 = us[k][j][i + 1];
um1 = us[k][j][i - 1];
rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 * (u[k][j][i + 1][0] - 2.0 * u[k][j][i][0] + u[k][j][i - 1][0]) - tx2 * (u[k][j][i + 1][1] - u[k][j][i - 1][1]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 * (u[k][j][i + 1][1] - 2.0 * u[k][j][i][1] + u[k][j][i - 1][1]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[k][j][i + 1][1] * up1 - u[k][j][i - 1][1] * um1 + (u[k][j][i + 1][4] - square[k][j][i + 1] - u[k][j][i - 1][4] + square[k][j][i - 1]) * c2);
rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 * (u[k][j][i + 1][2] - 2.0 * u[k][j][i][2] + u[k][j][i - 1][2]) + xxcon2 * (vs[k][j][i + 1] - 2.0 * vs[k][j][i] + vs[k][j][i - 1]) - tx2 * (u[k][j][i + 1][2] * up1 - u[k][j][i - 1][2] * um1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 * (u[k][j][i + 1][3] - 2.0 * u[k][j][i][3] + u[k][j][i - 1][3]) + xxcon2 * (ws[k][j][i + 1] - 2.0 * ws[k][j][i] + ws[k][j][i - 1]) - tx2 * (u[k][j][i + 1][3] * up1 - u[k][j][i - 1][3] * um1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 * (u[k][j][i + 1][4] - 2.0 * u[k][j][i][4] + u[k][j][i - 1][4]) + xxcon3 * (qs[k][j][i + 1] - 2.0 * qs[k][j][i] + qs[k][j][i - 1]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[k][j][i + 1][4] * rho_i[k][j][i + 1] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k][j][i - 1][4] * rho_i[k][j][i - 1]) - tx2 * ((c1 * u[k][j][i + 1][4] - c2 * square[k][j][i + 1]) * up1 - (c1 * u[k][j][i - 1][4] - c2 * square[k][j][i - 1]) * um1);
}
}
//---------------------------------------------------------------------
// add fourth order xi-direction dissipation
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, m, i) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
i = 1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
i = 2;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
}
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 3; i <= grid_points[0] - 4; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
}
}
#pragma omp parallel for default(shared) private(j, m, i) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
i = grid_points[0] - 3;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m]);
}
i = grid_points[0] - 2;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j][i - 2][m] - 4. * u[k][j][i - 1][m] + 5. * u[k][j][i][m]);
}
}
}
//---------------------------------------------------------------------
// compute eta-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, vijk, vp1, vm1) firstprivate(dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, dssp, grid_points, vs, u, us, square, ws, qs, rho_i)
for(k = 1; k <= grid_points[2] - 2; k++) {
#pragma omp parallel for default(shared) private(j, i, vijk, vp1, vm1) firstprivate(k, dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, grid_points, vs, u, us, square, ws, qs, rho_i)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, vijk, vp1, vm1) firstprivate(k, j, dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, grid_points, vs, u, us, square, ws, qs, rho_i)
for(i = 1; i <= grid_points[0] - 2; i++) {
vijk = vs[k][j][i];
vp1 = vs[k][j + 1][i];
vm1 = vs[k][j - 1][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 * (u[k][j + 1][i][0] - 2.0 * u[k][j][i][0] + u[k][j - 1][i][0]) - ty2 * (u[k][j + 1][i][2] - u[k][j - 1][i][2]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 * (u[k][j + 1][i][1] - 2.0 * u[k][j][i][1] + u[k][j - 1][i][1]) + yycon2 * (us[k][j + 1][i] - 2.0 * us[k][j][i] + us[k][j - 1][i]) - ty2 * (u[k][j + 1][i][1] * vp1 - u[k][j - 1][i][1] * vm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 * (u[k][j + 1][i][2] - 2.0 * u[k][j][i][2] + u[k][j - 1][i][2]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[k][j + 1][i][2] * vp1 - u[k][j - 1][i][2] * vm1 + (u[k][j + 1][i][4] - square[k][j + 1][i] - u[k][j - 1][i][4] + square[k][j - 1][i]) * c2);
rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 * (u[k][j + 1][i][3] - 2.0 * u[k][j][i][3] + u[k][j - 1][i][3]) + yycon2 * (ws[k][j + 1][i] - 2.0 * ws[k][j][i] + ws[k][j - 1][i]) - ty2 * (u[k][j + 1][i][3] * vp1 - u[k][j - 1][i][3] * vm1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 * (u[k][j + 1][i][4] - 2.0 * u[k][j][i][4] + u[k][j - 1][i][4]) + yycon3 * (qs[k][j + 1][i] - 2.0 * qs[k][j][i] + qs[k][j - 1][i]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[k][j + 1][i][4] * rho_i[k][j + 1][i] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k][j - 1][i][4] * rho_i[k][j - 1][i]) - ty2 * ((c1 * u[k][j + 1][i][4] - c2 * square[k][j + 1][i]) * vp1 - (c1 * u[k][j - 1][i][4] - c2 * square[k][j - 1][i]) * vm1);
}
}
//---------------------------------------------------------------------
// add fourth order eta-direction dissipation
//---------------------------------------------------------------------
j = 1;
#pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
j = 2;
#pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 3; j <= grid_points[1] - 4; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
}
j = grid_points[1] - 3;
#pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m]);
}
}
j = grid_points[1] - 2;
#pragma omp parallel for default(shared) private(i, m) firstprivate(j, k, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k][j - 2][i][m] - 4. * u[k][j - 1][i][m] + 5. * u[k][j][i][m]);
}
}
}
//---------------------------------------------------------------------
// compute zeta-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, wijk, wp1, wm1) firstprivate(dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, grid_points, ws, u, us, vs, square, qs, rho_i)
for(k = 1; k <= grid_points[2] - 2; k++) {
#pragma omp parallel for default(shared) private(j, i, wijk, wp1, wm1) firstprivate(k, dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, grid_points, ws, u, us, vs, square, qs, rho_i)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, wijk, wp1, wm1) firstprivate(k, j, dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, grid_points, ws, u, us, vs, square, qs, rho_i)
for(i = 1; i <= grid_points[0] - 2; i++) {
wijk = ws[k][j][i];
wp1 = ws[k + 1][j][i];
wm1 = ws[k - 1][j][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 * (u[k + 1][j][i][0] - 2.0 * u[k][j][i][0] + u[k - 1][j][i][0]) - tz2 * (u[k + 1][j][i][3] - u[k - 1][j][i][3]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 * (u[k + 1][j][i][1] - 2.0 * u[k][j][i][1] + u[k - 1][j][i][1]) + zzcon2 * (us[k + 1][j][i] - 2.0 * us[k][j][i] + us[k - 1][j][i]) - tz2 * (u[k + 1][j][i][1] * wp1 - u[k - 1][j][i][1] * wm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 * (u[k + 1][j][i][2] - 2.0 * u[k][j][i][2] + u[k - 1][j][i][2]) + zzcon2 * (vs[k + 1][j][i] - 2.0 * vs[k][j][i] + vs[k - 1][j][i]) - tz2 * (u[k + 1][j][i][2] * wp1 - u[k - 1][j][i][2] * wm1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 * (u[k + 1][j][i][3] - 2.0 * u[k][j][i][3] + u[k - 1][j][i][3]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[k + 1][j][i][3] * wp1 - u[k - 1][j][i][3] * wm1 + (u[k + 1][j][i][4] - square[k + 1][j][i] - u[k - 1][j][i][4] + square[k - 1][j][i]) * c2);
rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 * (u[k + 1][j][i][4] - 2.0 * u[k][j][i][4] + u[k - 1][j][i][4]) + zzcon3 * (qs[k + 1][j][i] - 2.0 * qs[k][j][i] + qs[k - 1][j][i]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[k + 1][j][i][4] * rho_i[k + 1][j][i] - 2.0 * u[k][j][i][4] * rho_i[k][j][i] + u[k - 1][j][i][4] * rho_i[k - 1][j][i]) - tz2 * ((c1 * u[k + 1][j][i][4] - c2 * square[k + 1][j][i]) * wp1 - (c1 * u[k - 1][j][i][4] - c2 * square[k - 1][j][i]) * wm1);
}
}
}
//---------------------------------------------------------------------
// add fourth order zeta-direction dissipation
//---------------------------------------------------------------------
k = 1;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (5.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
k = 2;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (-4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(dssp, grid_points, u)
for(k = 3; k <= grid_points[2] - 4; k++) {
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
}
k = grid_points[2] - 3;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m]);
}
}
}
k = grid_points[2] - 2;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dssp, grid_points, u)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp * (u[k - 2][j][i][m] - 4. * u[k - 1][j][i][m] + 5. * u[k][j][i][m]);
}
}
}
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(dt, grid_points)
for(k = 1; k <= grid_points[2] - 2; k++) {
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dt, grid_points)
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, m) firstprivate(k, j, dt, grid_points)
for(i = 1; i <= grid_points[0] - 2; i++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
rhs[k][j][i][m] = rhs[k][j][i][m] * dt;
}
}
}
}
}
void set_constants() {
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double) (grid_points[0] - 1);
dnym1 = 1.0 / (double) (grid_points[1] - 1);
dnzm1 = 1.0 / (double) (grid_points[2] - 1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0 - c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = ((dx3) > (dx4) ? (dx3) : (dx4));
dymax = ((dy2) > (dy4) ? (dy2) : (dy4));
dzmax = ((dz2) > (dz3) ? (dz2) : (dz3));
dssp = 0.25 * ((dx1) > (((dy1) > (dz1) ? (dy1) : (dz1))) ? (dx1) : (((dy1) > (dz1) ? (dy1) : (dz1))));
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt * tx1;
dttx2 = dt * tx2;
dtty1 = dt * ty1;
dtty2 = dt * ty2;
dttz1 = dt * tz1;
dttz2 = dt * tz2;
c2dttx1 = 2.0 * dttx1;
c2dtty1 = 2.0 * dtty1;
c2dttz1 = 2.0 * dttz1;
dtdssp = dt * dssp;
comz1 = dtdssp;
comz4 = 4.0 * dtdssp;
comz5 = 5.0 * dtdssp;
comz6 = 6.0 * dtdssp;
c3c4tx3 = c3c4 * tx3;
c3c4ty3 = c3c4 * ty3;
c3c4tz3 = c3c4 * tz3;
dx1tx1 = dx1 * tx1;
dx2tx1 = dx2 * tx1;
dx3tx1 = dx3 * tx1;
dx4tx1 = dx4 * tx1;
dx5tx1 = dx5 * tx1;
dy1ty1 = dy1 * ty1;
dy2ty1 = dy2 * ty1;
dy3ty1 = dy3 * ty1;
dy4ty1 = dy4 * ty1;
dy5ty1 = dy5 * ty1;
dz1tz1 = dz1 * tz1;
dz2tz1 = dz2 * tz1;
dz3tz1 = dz3 * tz1;
dz4tz1 = dz4 * tz1;
dz5tz1 = dz5 * tz1;
c2iv = 2.5;
con43 = 4.0 / 3.0;
con16 = 1.0 / 6.0;
xxcon1 = c3c4tx3 * con43 * tx3;
xxcon2 = c3c4tx3 * tx3;
xxcon3 = c3c4tx3 * conz1 * tx3;
xxcon4 = c3c4tx3 * con16 * tx3;
xxcon5 = c3c4tx3 * c1c5 * tx3;
yycon1 = c3c4ty3 * con43 * ty3;
yycon2 = c3c4ty3 * ty3;
yycon3 = c3c4ty3 * conz1 * ty3;
yycon4 = c3c4ty3 * con16 * ty3;
yycon5 = c3c4ty3 * c1c5 * ty3;
zzcon1 = c3c4tz3 * con43 * tz3;
zzcon2 = c3c4tz3 * tz3;
zzcon3 = c3c4tz3 * conz1 * tz3;
zzcon4 = c3c4tz3 * con16 * tz3;
zzcon5 = c3c4tz3 * c1c5 * tz3;
}
//---------------------------------------------------------------------
// subtracts bvec=bvec - ablock*avec
//---------------------------------------------------------------------
void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) {
//---------------------------------------------------------------------
// rhs[kc][jc][ic][i] = rhs[kc][jc][ic][i]
// $ - lhs[ia][ablock][0][i]*
//---------------------------------------------------------------------
bvec[0] = bvec[0] - ablock[0][0] * avec[0] - ablock[1][0] * avec[1] - ablock[2][0] * avec[2] - ablock[3][0] * avec[3] - ablock[4][0] * avec[4];
bvec[1] = bvec[1] - ablock[0][1] * avec[0] - ablock[1][1] * avec[1] - ablock[2][1] * avec[2] - ablock[3][1] * avec[3] - ablock[4][1] * avec[4];
bvec[2] = bvec[2] - ablock[0][2] * avec[0] - ablock[1][2] * avec[1] - ablock[2][2] * avec[2] - ablock[3][2] * avec[3] - ablock[4][2] * avec[4];
bvec[3] = bvec[3] - ablock[0][3] * avec[0] - ablock[1][3] * avec[1] - ablock[2][3] * avec[2] - ablock[3][3] * avec[3] - ablock[4][3] * avec[4];
bvec[4] = bvec[4] - ablock[0][4] * avec[0] - ablock[1][4] * avec[1] - ablock[2][4] * avec[2] - ablock[3][4] * avec[3] - ablock[4][4] * avec[4];
}
//---------------------------------------------------------------------
// subtracts a(i,j,k) X b(i,j,k) from c(i,j,k)
//---------------------------------------------------------------------
void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]) {
cblock[0][0] = cblock[0][0] - ablock[0][0] * bblock[0][0] - ablock[1][0] * bblock[0][1] - ablock[2][0] * bblock[0][2] - ablock[3][0] * bblock[0][3] - ablock[4][0] * bblock[0][4];
cblock[0][1] = cblock[0][1] - ablock[0][1] * bblock[0][0] - ablock[1][1] * bblock[0][1] - ablock[2][1] * bblock[0][2] - ablock[3][1] * bblock[0][3] - ablock[4][1] * bblock[0][4];
cblock[0][2] = cblock[0][2] - ablock[0][2] * bblock[0][0] - ablock[1][2] * bblock[0][1] - ablock[2][2] * bblock[0][2] - ablock[3][2] * bblock[0][3] - ablock[4][2] * bblock[0][4];
cblock[0][3] = cblock[0][3] - ablock[0][3] * bblock[0][0] - ablock[1][3] * bblock[0][1] - ablock[2][3] * bblock[0][2] - ablock[3][3] * bblock[0][3] - ablock[4][3] * bblock[0][4];
cblock[0][4] = cblock[0][4] - ablock[0][4] * bblock[0][0] - ablock[1][4] * bblock[0][1] - ablock[2][4] * bblock[0][2] - ablock[3][4] * bblock[0][3] - ablock[4][4] * bblock[0][4];
cblock[1][0] = cblock[1][0] - ablock[0][0] * bblock[1][0] - ablock[1][0] * bblock[1][1] - ablock[2][0] * bblock[1][2] - ablock[3][0] * bblock[1][3] - ablock[4][0] * bblock[1][4];
cblock[1][1] = cblock[1][1] - ablock[0][1] * bblock[1][0] - ablock[1][1] * bblock[1][1] - ablock[2][1] * bblock[1][2] - ablock[3][1] * bblock[1][3] - ablock[4][1] * bblock[1][4];
cblock[1][2] = cblock[1][2] - ablock[0][2] * bblock[1][0] - ablock[1][2] * bblock[1][1] - ablock[2][2] * bblock[1][2] - ablock[3][2] * bblock[1][3] - ablock[4][2] * bblock[1][4];
cblock[1][3] = cblock[1][3] - ablock[0][3] * bblock[1][0] - ablock[1][3] * bblock[1][1] - ablock[2][3] * bblock[1][2] - ablock[3][3] * bblock[1][3] - ablock[4][3] * bblock[1][4];
cblock[1][4] = cblock[1][4] - ablock[0][4] * bblock[1][0] - ablock[1][4] * bblock[1][1] - ablock[2][4] * bblock[1][2] - ablock[3][4] * bblock[1][3] - ablock[4][4] * bblock[1][4];
cblock[2][0] = cblock[2][0] - ablock[0][0] * bblock[2][0] - ablock[1][0] * bblock[2][1] - ablock[2][0] * bblock[2][2] - ablock[3][0] * bblock[2][3] - ablock[4][0] * bblock[2][4];
cblock[2][1] = cblock[2][1] - ablock[0][1] * bblock[2][0] - ablock[1][1] * bblock[2][1] - ablock[2][1] * bblock[2][2] - ablock[3][1] * bblock[2][3] - ablock[4][1] * bblock[2][4];
cblock[2][2] = cblock[2][2] - ablock[0][2] * bblock[2][0] - ablock[1][2] * bblock[2][1] - ablock[2][2] * bblock[2][2] - ablock[3][2] * bblock[2][3] - ablock[4][2] * bblock[2][4];
cblock[2][3] = cblock[2][3] - ablock[0][3] * bblock[2][0] - ablock[1][3] * bblock[2][1] - ablock[2][3] * bblock[2][2] - ablock[3][3] * bblock[2][3] - ablock[4][3] * bblock[2][4];
cblock[2][4] = cblock[2][4] - ablock[0][4] * bblock[2][0] - ablock[1][4] * bblock[2][1] - ablock[2][4] * bblock[2][2] - ablock[3][4] * bblock[2][3] - ablock[4][4] * bblock[2][4];
cblock[3][0] = cblock[3][0] - ablock[0][0] * bblock[3][0] - ablock[1][0] * bblock[3][1] - ablock[2][0] * bblock[3][2] - ablock[3][0] * bblock[3][3] - ablock[4][0] * bblock[3][4];
cblock[3][1] = cblock[3][1] - ablock[0][1] * bblock[3][0] - ablock[1][1] * bblock[3][1] - ablock[2][1] * bblock[3][2] - ablock[3][1] * bblock[3][3] - ablock[4][1] * bblock[3][4];
cblock[3][2] = cblock[3][2] - ablock[0][2] * bblock[3][0] - ablock[1][2] * bblock[3][1] - ablock[2][2] * bblock[3][2] - ablock[3][2] * bblock[3][3] - ablock[4][2] * bblock[3][4];
cblock[3][3] = cblock[3][3] - ablock[0][3] * bblock[3][0] - ablock[1][3] * bblock[3][1] - ablock[2][3] * bblock[3][2] - ablock[3][3] * bblock[3][3] - ablock[4][3] * bblock[3][4];
cblock[3][4] = cblock[3][4] - ablock[0][4] * bblock[3][0] - ablock[1][4] * bblock[3][1] - ablock[2][4] * bblock[3][2] - ablock[3][4] * bblock[3][3] - ablock[4][4] * bblock[3][4];
cblock[4][0] = cblock[4][0] - ablock[0][0] * bblock[4][0] - ablock[1][0] * bblock[4][1] - ablock[2][0] * bblock[4][2] - ablock[3][0] * bblock[4][3] - ablock[4][0] * bblock[4][4];
cblock[4][1] = cblock[4][1] - ablock[0][1] * bblock[4][0] - ablock[1][1] * bblock[4][1] - ablock[2][1] * bblock[4][2] - ablock[3][1] * bblock[4][3] - ablock[4][1] * bblock[4][4];
cblock[4][2] = cblock[4][2] - ablock[0][2] * bblock[4][0] - ablock[1][2] * bblock[4][1] - ablock[2][2] * bblock[4][2] - ablock[3][2] * bblock[4][3] - ablock[4][2] * bblock[4][4];
cblock[4][3] = cblock[4][3] - ablock[0][3] * bblock[4][0] - ablock[1][3] * bblock[4][1] - ablock[2][3] * bblock[4][2] - ablock[3][3] * bblock[4][3] - ablock[4][3] * bblock[4][4];
cblock[4][4] = cblock[4][4] - ablock[0][4] * bblock[4][0] - ablock[1][4] * bblock[4][1] - ablock[2][4] * bblock[4][2] - ablock[3][4] * bblock[4][3] - ablock[4][4] * bblock[4][4];
}
void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) {
double pivot, coeff;
pivot = 1.00 / lhs[0][0];
lhs[1][0] = lhs[1][0] * pivot;
lhs[2][0] = lhs[2][0] * pivot;
lhs[3][0] = lhs[3][0] * pivot;
lhs[4][0] = lhs[4][0] * pivot;
c[0][0] = c[0][0] * pivot;
c[1][0] = c[1][0] * pivot;
c[2][0] = c[2][0] * pivot;
c[3][0] = c[3][0] * pivot;
c[4][0] = c[4][0] * pivot;
r[0] = r[0] * pivot;
coeff = lhs[0][1];
lhs[1][1] = lhs[1][1] - coeff * lhs[1][0];
lhs[2][1] = lhs[2][1] - coeff * lhs[2][0];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][0];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][0];
c[0][1] = c[0][1] - coeff * c[0][0];
c[1][1] = c[1][1] - coeff * c[1][0];
c[2][1] = c[2][1] - coeff * c[2][0];
c[3][1] = c[3][1] - coeff * c[3][0];
c[4][1] = c[4][1] - coeff * c[4][0];
r[1] = r[1] - coeff * r[0];
coeff = lhs[0][2];
lhs[1][2] = lhs[1][2] - coeff * lhs[1][0];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][0];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][0];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][0];
c[0][2] = c[0][2] - coeff * c[0][0];
c[1][2] = c[1][2] - coeff * c[1][0];
c[2][2] = c[2][2] - coeff * c[2][0];
c[3][2] = c[3][2] - coeff * c[3][0];
c[4][2] = c[4][2] - coeff * c[4][0];
r[2] = r[2] - coeff * r[0];
coeff = lhs[0][3];
lhs[1][3] = lhs[1][3] - coeff * lhs[1][0];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][0];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][0];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][0];
c[0][3] = c[0][3] - coeff * c[0][0];
c[1][3] = c[1][3] - coeff * c[1][0];
c[2][3] = c[2][3] - coeff * c[2][0];
c[3][3] = c[3][3] - coeff * c[3][0];
c[4][3] = c[4][3] - coeff * c[4][0];
r[3] = r[3] - coeff * r[0];
coeff = lhs[0][4];
lhs[1][4] = lhs[1][4] - coeff * lhs[1][0];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][0];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][0];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][0];
c[0][4] = c[0][4] - coeff * c[0][0];
c[1][4] = c[1][4] - coeff * c[1][0];
c[2][4] = c[2][4] - coeff * c[2][0];
c[3][4] = c[3][4] - coeff * c[3][0];
c[4][4] = c[4][4] - coeff * c[4][0];
r[4] = r[4] - coeff * r[0];
pivot = 1.00 / lhs[1][1];
lhs[2][1] = lhs[2][1] * pivot;
lhs[3][1] = lhs[3][1] * pivot;
lhs[4][1] = lhs[4][1] * pivot;
c[0][1] = c[0][1] * pivot;
c[1][1] = c[1][1] * pivot;
c[2][1] = c[2][1] * pivot;
c[3][1] = c[3][1] * pivot;
c[4][1] = c[4][1] * pivot;
r[1] = r[1] * pivot;
coeff = lhs[1][0];
lhs[2][0] = lhs[2][0] - coeff * lhs[2][1];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][1];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][1];
c[0][0] = c[0][0] - coeff * c[0][1];
c[1][0] = c[1][0] - coeff * c[1][1];
c[2][0] = c[2][0] - coeff * c[2][1];
c[3][0] = c[3][0] - coeff * c[3][1];
c[4][0] = c[4][0] - coeff * c[4][1];
r[0] = r[0] - coeff * r[1];
coeff = lhs[1][2];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][1];
c[0][2] = c[0][2] - coeff * c[0][1];
c[1][2] = c[1][2] - coeff * c[1][1];
c[2][2] = c[2][2] - coeff * c[2][1];
c[3][2] = c[3][2] - coeff * c[3][1];
c[4][2] = c[4][2] - coeff * c[4][1];
r[2] = r[2] - coeff * r[1];
coeff = lhs[1][3];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][1];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][1];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][1];
c[0][3] = c[0][3] - coeff * c[0][1];
c[1][3] = c[1][3] - coeff * c[1][1];
c[2][3] = c[2][3] - coeff * c[2][1];
c[3][3] = c[3][3] - coeff * c[3][1];
c[4][3] = c[4][3] - coeff * c[4][1];
r[3] = r[3] - coeff * r[1];
coeff = lhs[1][4];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][1];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][1];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][1];
c[0][4] = c[0][4] - coeff * c[0][1];
c[1][4] = c[1][4] - coeff * c[1][1];
c[2][4] = c[2][4] - coeff * c[2][1];
c[3][4] = c[3][4] - coeff * c[3][1];
c[4][4] = c[4][4] - coeff * c[4][1];
r[4] = r[4] - coeff * r[1];
pivot = 1.00 / lhs[2][2];
lhs[3][2] = lhs[3][2] * pivot;
lhs[4][2] = lhs[4][2] * pivot;
c[0][2] = c[0][2] * pivot;
c[1][2] = c[1][2] * pivot;
c[2][2] = c[2][2] * pivot;
c[3][2] = c[3][2] * pivot;
c[4][2] = c[4][2] * pivot;
r[2] = r[2] * pivot;
coeff = lhs[2][0];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][2];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][2];
c[0][0] = c[0][0] - coeff * c[0][2];
c[1][0] = c[1][0] - coeff * c[1][2];
c[2][0] = c[2][0] - coeff * c[2][2];
c[3][0] = c[3][0] - coeff * c[3][2];
c[4][0] = c[4][0] - coeff * c[4][2];
r[0] = r[0] - coeff * r[2];
coeff = lhs[2][1];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][2];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][2];
c[0][1] = c[0][1] - coeff * c[0][2];
c[1][1] = c[1][1] - coeff * c[1][2];
c[2][1] = c[2][1] - coeff * c[2][2];
c[3][1] = c[3][1] - coeff * c[3][2];
c[4][1] = c[4][1] - coeff * c[4][2];
r[1] = r[1] - coeff * r[2];
coeff = lhs[2][3];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][2];
c[0][3] = c[0][3] - coeff * c[0][2];
c[1][3] = c[1][3] - coeff * c[1][2];
c[2][3] = c[2][3] - coeff * c[2][2];
c[3][3] = c[3][3] - coeff * c[3][2];
c[4][3] = c[4][3] - coeff * c[4][2];
r[3] = r[3] - coeff * r[2];
coeff = lhs[2][4];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][2];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][2];
c[0][4] = c[0][4] - coeff * c[0][2];
c[1][4] = c[1][4] - coeff * c[1][2];
c[2][4] = c[2][4] - coeff * c[2][2];
c[3][4] = c[3][4] - coeff * c[3][2];
c[4][4] = c[4][4] - coeff * c[4][2];
r[4] = r[4] - coeff * r[2];
pivot = 1.00 / lhs[3][3];
lhs[4][3] = lhs[4][3] * pivot;
c[0][3] = c[0][3] * pivot;
c[1][3] = c[1][3] * pivot;
c[2][3] = c[2][3] * pivot;
c[3][3] = c[3][3] * pivot;
c[4][3] = c[4][3] * pivot;
r[3] = r[3] * pivot;
coeff = lhs[3][0];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][3];
c[0][0] = c[0][0] - coeff * c[0][3];
c[1][0] = c[1][0] - coeff * c[1][3];
c[2][0] = c[2][0] - coeff * c[2][3];
c[3][0] = c[3][0] - coeff * c[3][3];
c[4][0] = c[4][0] - coeff * c[4][3];
r[0] = r[0] - coeff * r[3];
coeff = lhs[3][1];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][3];
c[0][1] = c[0][1] - coeff * c[0][3];
c[1][1] = c[1][1] - coeff * c[1][3];
c[2][1] = c[2][1] - coeff * c[2][3];
c[3][1] = c[3][1] - coeff * c[3][3];
c[4][1] = c[4][1] - coeff * c[4][3];
r[1] = r[1] - coeff * r[3];
coeff = lhs[3][2];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][3];
c[0][2] = c[0][2] - coeff * c[0][3];
c[1][2] = c[1][2] - coeff * c[1][3];
c[2][2] = c[2][2] - coeff * c[2][3];
c[3][2] = c[3][2] - coeff * c[3][3];
c[4][2] = c[4][2] - coeff * c[4][3];
r[2] = r[2] - coeff * r[3];
coeff = lhs[3][4];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][3];
c[0][4] = c[0][4] - coeff * c[0][3];
c[1][4] = c[1][4] - coeff * c[1][3];
c[2][4] = c[2][4] - coeff * c[2][3];
c[3][4] = c[3][4] - coeff * c[3][3];
c[4][4] = c[4][4] - coeff * c[4][3];
r[4] = r[4] - coeff * r[3];
pivot = 1.00 / lhs[4][4];
c[0][4] = c[0][4] * pivot;
c[1][4] = c[1][4] * pivot;
c[2][4] = c[2][4] * pivot;
c[3][4] = c[3][4] * pivot;
c[4][4] = c[4][4] * pivot;
r[4] = r[4] * pivot;
coeff = lhs[4][0];
c[0][0] = c[0][0] - coeff * c[0][4];
c[1][0] = c[1][0] - coeff * c[1][4];
c[2][0] = c[2][0] - coeff * c[2][4];
c[3][0] = c[3][0] - coeff * c[3][4];
c[4][0] = c[4][0] - coeff * c[4][4];
r[0] = r[0] - coeff * r[4];
coeff = lhs[4][1];
c[0][1] = c[0][1] - coeff * c[0][4];
c[1][1] = c[1][1] - coeff * c[1][4];
c[2][1] = c[2][1] - coeff * c[2][4];
c[3][1] = c[3][1] - coeff * c[3][4];
c[4][1] = c[4][1] - coeff * c[4][4];
r[1] = r[1] - coeff * r[4];
coeff = lhs[4][2];
c[0][2] = c[0][2] - coeff * c[0][4];
c[1][2] = c[1][2] - coeff * c[1][4];
c[2][2] = c[2][2] - coeff * c[2][4];
c[3][2] = c[3][2] - coeff * c[3][4];
c[4][2] = c[4][2] - coeff * c[4][4];
r[2] = r[2] - coeff * r[4];
coeff = lhs[4][3];
c[0][3] = c[0][3] - coeff * c[0][4];
c[1][3] = c[1][3] - coeff * c[1][4];
c[2][3] = c[2][3] - coeff * c[2][4];
c[3][3] = c[3][3] - coeff * c[3][4];
c[4][3] = c[4][3] - coeff * c[4][4];
r[3] = r[3] - coeff * r[4];
}
void binvrhs(double lhs[5][5], double r[5]) {
double pivot, coeff;
pivot = 1.00 / lhs[0][0];
lhs[1][0] = lhs[1][0] * pivot;
lhs[2][0] = lhs[2][0] * pivot;
lhs[3][0] = lhs[3][0] * pivot;
lhs[4][0] = lhs[4][0] * pivot;
r[0] = r[0] * pivot;
coeff = lhs[0][1];
lhs[1][1] = lhs[1][1] - coeff * lhs[1][0];
lhs[2][1] = lhs[2][1] - coeff * lhs[2][0];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][0];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][0];
r[1] = r[1] - coeff * r[0];
coeff = lhs[0][2];
lhs[1][2] = lhs[1][2] - coeff * lhs[1][0];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][0];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][0];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][0];
r[2] = r[2] - coeff * r[0];
coeff = lhs[0][3];
lhs[1][3] = lhs[1][3] - coeff * lhs[1][0];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][0];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][0];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][0];
r[3] = r[3] - coeff * r[0];
coeff = lhs[0][4];
lhs[1][4] = lhs[1][4] - coeff * lhs[1][0];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][0];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][0];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][0];
r[4] = r[4] - coeff * r[0];
pivot = 1.00 / lhs[1][1];
lhs[2][1] = lhs[2][1] * pivot;
lhs[3][1] = lhs[3][1] * pivot;
lhs[4][1] = lhs[4][1] * pivot;
r[1] = r[1] * pivot;
coeff = lhs[1][0];
lhs[2][0] = lhs[2][0] - coeff * lhs[2][1];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][1];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][1];
r[0] = r[0] - coeff * r[1];
coeff = lhs[1][2];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][1];
r[2] = r[2] - coeff * r[1];
coeff = lhs[1][3];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][1];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][1];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][1];
r[3] = r[3] - coeff * r[1];
coeff = lhs[1][4];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][1];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][1];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][1];
r[4] = r[4] - coeff * r[1];
pivot = 1.00 / lhs[2][2];
lhs[3][2] = lhs[3][2] * pivot;
lhs[4][2] = lhs[4][2] * pivot;
r[2] = r[2] * pivot;
coeff = lhs[2][0];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][2];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][2];
r[0] = r[0] - coeff * r[2];
coeff = lhs[2][1];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][2];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][2];
r[1] = r[1] - coeff * r[2];
coeff = lhs[2][3];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][2];
r[3] = r[3] - coeff * r[2];
coeff = lhs[2][4];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][2];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][2];
r[4] = r[4] - coeff * r[2];
pivot = 1.00 / lhs[3][3];
lhs[4][3] = lhs[4][3] * pivot;
r[3] = r[3] * pivot;
coeff = lhs[3][0];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][3];
r[0] = r[0] - coeff * r[3];
coeff = lhs[3][1];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][3];
r[1] = r[1] - coeff * r[3];
coeff = lhs[3][2];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][3];
r[2] = r[2] - coeff * r[3];
coeff = lhs[3][4];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][3];
r[4] = r[4] - coeff * r[3];
pivot = 1.00 / lhs[4][4];
r[4] = r[4] * pivot;
coeff = lhs[4][0];
r[0] = r[0] - coeff * r[4];
coeff = lhs[4][1];
r[1] = r[1] - coeff * r[4];
coeff = lhs[4][2];
r[2] = r[2] - coeff * r[4];
coeff = lhs[4][3];
r[3] = r[3] - coeff * r[4];
}
//---------------------------------------------------------------------
// verification routine
//---------------------------------------------------------------------
void verify(int no_time_steps, char *Class, int *verified) {
double xcrref[5];
double xceref[5];
double xcrdif[5];
double xcedif[5];
double epsilon;
double xce[5];
double xcr[5];
double dtref = 0.0;
int m;
//---------------------------------------------------------------------
// tolerance level
//---------------------------------------------------------------------
epsilon = 1.0e-08;
//---------------------------------------------------------------------
// compute the error norm and the residual norm, and exit if not printing
//---------------------------------------------------------------------
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*Class = 'U';
*verified = 1;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
//---------------------------------------------------------------------
// reference data for 12X12X12 grids after 60 time steps, with DT = 1.0e-02
//---------------------------------------------------------------------
if((grid_points[0] == 12) && (grid_points[1] == 12) && (grid_points[2] == 12) && (no_time_steps == 60)) {
*Class = 'S';
dtref = 1.0e-2;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
//---------------------------------------------------------------------
// reference data for 24X24X24 grids after 200 time steps,
// with DT = 0.8e-3
//---------------------------------------------------------------------
}
else if((grid_points[0] == 24) && (grid_points[1] == 24) && (grid_points[2] == 24) && (no_time_steps == 200)) {
*Class = 'W';
dtref = 0.8e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
//---------------------------------------------------------------------
// reference data for 64X64X64 grids after 200 time steps,
// with DT = 0.8e-3
//---------------------------------------------------------------------
}
else if((grid_points[0] == 64) && (grid_points[1] == 64) && (grid_points[2] == 64) && (no_time_steps == 200)) {
*Class = 'A';
dtref = 0.8e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
//---------------------------------------------------------------------
// reference data for 102X102X102 grids after 200 time steps,
// with DT = 3.0e-04
//---------------------------------------------------------------------
}
else if((grid_points[0] == 102) && (grid_points[1] == 102) && (grid_points[2] == 102) && (no_time_steps == 200)) {
*Class = 'B';
dtref = 3.0e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
//---------------------------------------------------------------------
// reference data for 162X162X162 grids after 200 time steps,
// with DT = 1.0e-04
//---------------------------------------------------------------------
}
else if((grid_points[0] == 162) && (grid_points[1] == 162) && (grid_points[2] == 162) && (no_time_steps == 200)) {
*Class = 'C';
dtref = 1.0e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
//---------------------------------------------------------------------
// reference data for 408x408x408 grids after 250 time steps,
// with DT = 0.2e-04
//---------------------------------------------------------------------
}
else if((grid_points[0] == 408) && (grid_points[1] == 408) && (grid_points[2] == 408) && (no_time_steps == 250)) {
*Class = 'D';
dtref = 0.2e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.2533188551738e+05;
xcrref[1] = 0.2346393716980e+04;
xcrref[2] = 0.6294554366904e+04;
xcrref[3] = 0.5352565376030e+04;
xcrref[4] = 0.3905864038618e+05;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.3100009377557e+03;
xceref[1] = 0.2424086324913e+02;
xceref[2] = 0.7782212022645e+02;
xceref[3] = 0.6835623860116e+02;
xceref[4] = 0.6065737200368e+03;
//---------------------------------------------------------------------
// reference data for 1020x1020x1020 grids after 250 time steps,
// with DT = 0.4e-05
//---------------------------------------------------------------------
}
else if((grid_points[0] == 1020) && (grid_points[1] == 1020) && (grid_points[2] == 1020) && (no_time_steps == 250)) {
*Class = 'E';
dtref = 0.4e-5;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.9795372484517e+05;
xcrref[1] = 0.9739814511521e+04;
xcrref[2] = 0.2467606342965e+05;
xcrref[3] = 0.2092419572860e+05;
xcrref[4] = 0.1392138856939e+06;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.4327562208414e+03;
xceref[1] = 0.3699051964887e+02;
xceref[2] = 0.1089845040954e+03;
xceref[3] = 0.9462517622043e+02;
xceref[4] = 0.7765512765309e+03;
}
else {
*verified = 0;
}
//---------------------------------------------------------------------
// verification test for residuals if gridsize is one of
// the defined grid sizes above (*Class != 'U')
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// Compute the difference of solution values and the known reference values.
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m] - xcrref[m]) / xcrref[m]);
xcedif[m] = fabs((xce[m] - xceref[m]) / xceref[m]);
}
//---------------------------------------------------------------------
// Output the comparison of computed results to known cases.
//---------------------------------------------------------------------
if(*Class != 'U') {
printf(" Verification being performed for class %c\n", *Class);
printf(" accuracy setting for epsilon = %20.13E\n", epsilon);
*verified = (fabs(dt - dtref) <= epsilon);
if(!(*verified)) {
*Class = 'U';
printf(" DT does not match the reference value of %15.8E\n", dtref);
}
}
else {
printf(" Unknown class\n");
}
if(*Class != 'U') {
printf(" Comparison of RMS-norms of residual\n");
}
else {
printf(" RMS-norms of residual\n");
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
if(*Class == 'U') {
printf(" %2d%20.13E\n", m + 1, xcr[m]);
}
else if(xcrdif[m] <= epsilon) {
printf(" %2d%20.13E%20.13E%20.13E\n", m + 1, xcr[m], xcrref[m], xcrdif[m]);
}
else {
*verified = 0;
printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n", m + 1, xcr[m], xcrref[m], xcrdif[m]);
}
}
if(*Class != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
}
else {
printf(" RMS-norms of solution error\n");
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
if(*Class == 'U') {
printf(" %2d%20.13E\n", m + 1, xce[m]);
}
else if(xcedif[m] <= epsilon) {
printf(" %2d%20.13E%20.13E%20.13E\n", m + 1, xce[m], xceref[m], xcedif[m]);
}
else {
*verified = 0;
printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n", m + 1, xce[m], xceref[m], xcedif[m]);
}
}
if(*Class == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
}
else if(*verified) {
printf(" Verification Successful\n");
}
else {
printf(" Verification failed\n");
}
}
//---------------------------------------------------------------------
//
// Performs line solves in X direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//
//---------------------------------------------------------------------
void x_solve() {
int i, j, k, m, n, isize;
double fjac[25][5][5];
double njac[25][5][5];
double lhs[25][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side in the xi-direction
//---------------------------------------------------------------------
isize = grid_points[0] - 1;
//---------------------------------------------------------------------
// determine a (labeled f) and n jacobians
//---------------------------------------------------------------------
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(k = 1; k <= grid_points[2] - 2; k++) {
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(j = 1; j <= grid_points[1] - 2; j++) {
#pragma omp parallel for default(shared) private(i, tmp1, tmp2, tmp3) firstprivate(isize, k, j, c2, c1, con43, c3c4, c1345, rho_i, u, qs, square)
for(i = 0; i <= isize; i++) {
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
//-------------------------------------------------------------------
//
//-------------------------------------------------------------------
fjac[i][0][0] = 0.0;
fjac[i][1][0] = 1.0;
fjac[i][2][0] = 0.0;
fjac[i][3][0] = 0.0;
fjac[i][4][0] = 0.0;
fjac[i][0][1] = -(u[k][j][i][1] * tmp2 * u[k][j][i][1]) + c2 * qs[k][j][i];
fjac[i][1][1] = (2.0 - c2) * (u[k][j][i][1] / u[k][j][i][0]);
fjac[i][2][1] = -c2 * (u[k][j][i][2] * tmp1);
fjac[i][3][1] = -c2 * (u[k][j][i][3] * tmp1);
fjac[i][4][1] = c2;
fjac[i][0][2] = -(u[k][j][i][1] * u[k][j][i][2]) * tmp2;
fjac[i][1][2] = u[k][j][i][2] * tmp1;
fjac[i][2][2] = u[k][j][i][1] * tmp1;
fjac[i][3][2] = 0.0;
fjac[i][4][2] = 0.0;
fjac[i][0][3] = -(u[k][j][i][1] * u[k][j][i][3]) * tmp2;
fjac[i][1][3] = u[k][j][i][3] * tmp1;
fjac[i][2][3] = 0.0;
fjac[i][3][3] = u[k][j][i][1] * tmp1;
fjac[i][4][3] = 0.0;
fjac[i][0][4] = (c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4]) * (u[k][j][i][1] * tmp2);
fjac[i][1][4] = c1 * u[k][j][i][4] * tmp1 - c2 * (u[k][j][i][1] * u[k][j][i][1] * tmp2 + qs[k][j][i]);
fjac[i][2][4] = -c2 * (u[k][j][i][2] * u[k][j][i][1]) * tmp2;
fjac[i][3][4] = -c2 * (u[k][j][i][3] * u[k][j][i][1]) * tmp2;
fjac[i][4][4] = c1 * (u[k][j][i][1] * tmp1);
njac[i][0][0] = 0.0;
njac[i][1][0] = 0.0;
njac[i][2][0] = 0.0;
njac[i][3][0] = 0.0;
njac[i][4][0] = 0.0;
njac[i][0][1] = -con43 * c3c4 * tmp2 * u[k][j][i][1];
njac[i][1][1] = con43 * c3c4 * tmp1;
njac[i][2][1] = 0.0;
njac[i][3][1] = 0.0;
njac[i][4][1] = 0.0;
njac[i][0][2] = -c3c4 * tmp2 * u[k][j][i][2];
njac[i][1][2] = 0.0;
njac[i][2][2] = c3c4 * tmp1;
njac[i][3][2] = 0.0;
njac[i][4][2] = 0.0;
njac[i][0][3] = -c3c4 * tmp2 * u[k][j][i][3];
njac[i][1][3] = 0.0;
njac[i][2][3] = 0.0;
njac[i][3][3] = c3c4 * tmp1;
njac[i][4][3] = 0.0;
njac[i][0][4] = -(con43 * c3c4 - c1345) * tmp3 * (u[k][j][i][1] * u[k][j][i][1]) - (c3c4 - c1345) * tmp3 * (u[k][j][i][2] * u[k][j][i][2]) - (c3c4 - c1345) * tmp3 * (u[k][j][i][3] * u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4];
njac[i][1][4] = (con43 * c3c4 - c1345) * tmp2 * u[k][j][i][1];
njac[i][2][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][2];
njac[i][3][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][3];
njac[i][4][4] = (c1345) * tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in x direction
//---------------------------------------------------------------------
lhsinit(lhs, isize);
#pragma omp parallel for default(shared) private(i, tmp1, tmp2) firstprivate(isize, dt, tx1, tx2, dx1, dx2, dx3, dx4, dx5, fjac, njac)
for(i = 1; i <= isize - 1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][0][0][0] = -tmp2 * fjac[i - 1][0][0] - tmp1 * njac[i - 1][0][0] - tmp1 * dx1;
lhs[i][0][1][0] = -tmp2 * fjac[i - 1][1][0] - tmp1 * njac[i - 1][1][0];
lhs[i][0][2][0] = -tmp2 * fjac[i - 1][2][0] - tmp1 * njac[i - 1][2][0];
lhs[i][0][3][0] = -tmp2 * fjac[i - 1][3][0] - tmp1 * njac[i - 1][3][0];
lhs[i][0][4][0] = -tmp2 * fjac[i - 1][4][0] - tmp1 * njac[i - 1][4][0];
lhs[i][0][0][1] = -tmp2 * fjac[i - 1][0][1] - tmp1 * njac[i - 1][0][1];
lhs[i][0][1][1] = -tmp2 * fjac[i - 1][1][1] - tmp1 * njac[i - 1][1][1] - tmp1 * dx2;
lhs[i][0][2][1] = -tmp2 * fjac[i - 1][2][1] - tmp1 * njac[i - 1][2][1];
lhs[i][0][3][1] = -tmp2 * fjac[i - 1][3][1] - tmp1 * njac[i - 1][3][1];
lhs[i][0][4][1] = -tmp2 * fjac[i - 1][4][1] - tmp1 * njac[i - 1][4][1];
lhs[i][0][0][2] = -tmp2 * fjac[i - 1][0][2] - tmp1 * njac[i - 1][0][2];
lhs[i][0][1][2] = -tmp2 * fjac[i - 1][1][2] - tmp1 * njac[i - 1][1][2];
lhs[i][0][2][2] = -tmp2 * fjac[i - 1][2][2] - tmp1 * njac[i - 1][2][2] - tmp1 * dx3;
lhs[i][0][3][2] = -tmp2 * fjac[i - 1][3][2] - tmp1 * njac[i - 1][3][2];
lhs[i][0][4][2] = -tmp2 * fjac[i - 1][4][2] - tmp1 * njac[i - 1][4][2];
lhs[i][0][0][3] = -tmp2 * fjac[i - 1][0][3] - tmp1 * njac[i - 1][0][3];
lhs[i][0][1][3] = -tmp2 * fjac[i - 1][1][3] - tmp1 * njac[i - 1][1][3];
lhs[i][0][2][3] = -tmp2 * fjac[i - 1][2][3] - tmp1 * njac[i - 1][2][3];
lhs[i][0][3][3] = -tmp2 * fjac[i - 1][3][3] - tmp1 * njac[i - 1][3][3] - tmp1 * dx4;
lhs[i][0][4][3] = -tmp2 * fjac[i - 1][4][3] - tmp1 * njac[i - 1][4][3];
lhs[i][0][0][4] = -tmp2 * fjac[i - 1][0][4] - tmp1 * njac[i - 1][0][4];
lhs[i][0][1][4] = -tmp2 * fjac[i - 1][1][4] - tmp1 * njac[i - 1][1][4];
lhs[i][0][2][4] = -tmp2 * fjac[i - 1][2][4] - tmp1 * njac[i - 1][2][4];
lhs[i][0][3][4] = -tmp2 * fjac[i - 1][3][4] - tmp1 * njac[i - 1][3][4];
lhs[i][0][4][4] = -tmp2 * fjac[i - 1][4][4] - tmp1 * njac[i - 1][4][4] - tmp1 * dx5;
lhs[i][1][0][0] = 1.0 + tmp1 * 2.0 * njac[i][0][0] + tmp1 * 2.0 * dx1;
lhs[i][1][1][0] = tmp1 * 2.0 * njac[i][1][0];
lhs[i][1][2][0] = tmp1 * 2.0 * njac[i][2][0];
lhs[i][1][3][0] = tmp1 * 2.0 * njac[i][3][0];
lhs[i][1][4][0] = tmp1 * 2.0 * njac[i][4][0];
lhs[i][1][0][1] = tmp1 * 2.0 * njac[i][0][1];
lhs[i][1][1][1] = 1.0 + tmp1 * 2.0 * njac[i][1][1] + tmp1 * 2.0 * dx2;
lhs[i][1][2][1] = tmp1 * 2.0 * njac[i][2][1];
lhs[i][1][3][1] = tmp1 * 2.0 * njac[i][3][1];
lhs[i][1][4][1] = tmp1 * 2.0 * njac[i][4][1];
lhs[i][1][0][2] = tmp1 * 2.0 * njac[i][0][2];
lhs[i][1][1][2] = tmp1 * 2.0 * njac[i][1][2];
lhs[i][1][2][2] = 1.0 + tmp1 * 2.0 * njac[i][2][2] + tmp1 * 2.0 * dx3;
lhs[i][1][3][2] = tmp1 * 2.0 * njac[i][3][2];
lhs[i][1][4][2] = tmp1 * 2.0 * njac[i][4][2];
lhs[i][1][0][3] = tmp1 * 2.0 * njac[i][0][3];
lhs[i][1][1][3] = tmp1 * 2.0 * njac[i][1][3];
lhs[i][1][2][3] = tmp1 * 2.0 * njac[i][2][3];
lhs[i][1][3][3] = 1.0 + tmp1 * 2.0 * njac[i][3][3] + tmp1 * 2.0 * dx4;
lhs[i][1][4][3] = tmp1 * 2.0 * njac[i][4][3];
lhs[i][1][0][4] = tmp1 * 2.0 * njac[i][0][4];
lhs[i][1][1][4] = tmp1 * 2.0 * njac[i][1][4];
lhs[i][1][2][4] = tmp1 * 2.0 * njac[i][2][4];
lhs[i][1][3][4] = tmp1 * 2.0 * njac[i][3][4];
lhs[i][1][4][4] = 1.0 + tmp1 * 2.0 * njac[i][4][4] + tmp1 * 2.0 * dx5;
lhs[i][2][0][0] = tmp2 * fjac[i + 1][0][0] - tmp1 * njac[i + 1][0][0] - tmp1 * dx1;
lhs[i][2][1][0] = tmp2 * fjac[i + 1][1][0] - tmp1 * njac[i + 1][1][0];
lhs[i][2][2][0] = tmp2 * fjac[i + 1][2][0] - tmp1 * njac[i + 1][2][0];
lhs[i][2][3][0] = tmp2 * fjac[i + 1][3][0] - tmp1 * njac[i + 1][3][0];
lhs[i][2][4][0] = tmp2 * fjac[i + 1][4][0] - tmp1 * njac[i + 1][4][0];
lhs[i][2][0][1] = tmp2 * fjac[i + 1][0][1] - tmp1 * njac[i + 1][0][1];
lhs[i][2][1][1] = tmp2 * fjac[i + 1][1][1] - tmp1 * njac[i + 1][1][1] - tmp1 * dx2;
lhs[i][2][2][1] = tmp2 * fjac[i + 1][2][1] - tmp1 * njac[i + 1][2][1];
lhs[i][2][3][1] = tmp2 * fjac[i + 1][3][1] - tmp1 * njac[i + 1][3][1];
lhs[i][2][4][1] = tmp2 * fjac[i + 1][4][1] - tmp1 * njac[i + 1][4][1];
lhs[i][2][0][2] = tmp2 * fjac[i + 1][0][2] - tmp1 * njac[i + 1][0][2];
lhs[i][2][1][2] = tmp2 * fjac[i + 1][1][2] - tmp1 * njac[i + 1][1][2];
lhs[i][2][2][2] = tmp2 * fjac[i + 1][2][2] - tmp1 * njac[i + 1][2][2] - tmp1 * dx3;
lhs[i][2][3][2] = tmp2 * fjac[i + 1][3][2] - tmp1 * njac[i + 1][3][2];
lhs[i][2][4][2] = tmp2 * fjac[i + 1][4][2] - tmp1 * njac[i + 1][4][2];
lhs[i][2][0][3] = tmp2 * fjac[i + 1][0][3] - tmp1 * njac[i + 1][0][3];
lhs[i][2][1][3] = tmp2 * fjac[i + 1][1][3] - tmp1 * njac[i + 1][1][3];
lhs[i][2][2][3] = tmp2 * fjac[i + 1][2][3] - tmp1 * njac[i + 1][2][3];
lhs[i][2][3][3] = tmp2 * fjac[i + 1][3][3] - tmp1 * njac[i + 1][3][3] - tmp1 * dx4;
lhs[i][2][4][3] = tmp2 * fjac[i + 1][4][3] - tmp1 * njac[i + 1][4][3];
lhs[i][2][0][4] = tmp2 * fjac[i + 1][0][4] - tmp1 * njac[i + 1][0][4];
lhs[i][2][1][4] = tmp2 * fjac[i + 1][1][4] - tmp1 * njac[i + 1][1][4];
lhs[i][2][2][4] = tmp2 * fjac[i + 1][2][4] - tmp1 * njac[i + 1][2][4];
lhs[i][2][3][4] = tmp2 * fjac[i + 1][3][4] - tmp1 * njac[i + 1][3][4];
lhs[i][2][4][4] = tmp2 * fjac[i + 1][4][4] - tmp1 * njac[i + 1][4][4] - tmp1 * dx5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(IMAX) and rhs'(IMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][j][0] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs(lhs[0][1], lhs[0][2], rhs[k][j][0]);
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhs use : RW
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(i = 1; i <= isize - 1; i++) {
//-------------------------------------------------------------------
// rhs(i) = rhs(i) - A*rhs(i-1)
//-------------------------------------------------------------------
matvec_sub(lhs[i][0], rhs[k][j][i - 1], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(i) = B(i) - C(i-1)*A(i)
//-------------------------------------------------------------------
matmul_sub(lhs[i][0], lhs[i - 1][2], lhs[i][1]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs
//-------------------------------------------------------------------
binvcrhs(lhs[i][1], lhs[i][2], rhs[k][j][i]);
}
//---------------------------------------------------------------------
// rhs(isize) = rhs(isize) - A*rhs(isize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[isize][0], rhs[k][j][isize - 1], rhs[k][j][isize]);
//---------------------------------------------------------------------
// B(isize) = B(isize) - C(isize-1)*A(isize)
//---------------------------------------------------------------------
matmul_sub(lhs[isize][0], lhs[isize - 1][2], lhs[isize][1]);
//---------------------------------------------------------------------
// multiply rhs() by b_inverse() and copy to rhs
//---------------------------------------------------------------------
binvrhs(lhs[isize][1], rhs[k][j][isize]);
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(isize)=rhs(isize)
// else assume U(isize) is loaded in un pack backsub_info
// so just use it
// after u(istart) will be sent to next cell
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(i = isize - 1; i >= 0; i--) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(n = 0; n < 5; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[i][2][n][m] * rhs[k][j][i + 1][n];
}
}
}
}
}
}
//---------------------------------------------------------------------
// Performs line solves in Y direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void y_solve() {
int i, j, k, m, n, jsize;
double fjac[25][5][5];
double njac[25][5][5];
double lhs[25][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side for the three y-factors
//---------------------------------------------------------------------
jsize = grid_points[1] - 1;
//---------------------------------------------------------------------
// Compute the indices for storing the tri-diagonal matrix;
// determine a (labeled f) and n jacobians for cell c
//---------------------------------------------------------------------
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(k = 1; k <= grid_points[2] - 2; k++) {
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(i = 1; i <= grid_points[0] - 2; i++) {
#pragma omp parallel for default(shared) private(j, tmp1, tmp2, tmp3) firstprivate(jsize, k, i, c2, c1, c3c4, con43, c1345, rho_i, u, qs, square)
for(j = 0; j <= jsize; j++) {
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[j][0][0] = 0.0;
fjac[j][1][0] = 0.0;
fjac[j][2][0] = 1.0;
fjac[j][3][0] = 0.0;
fjac[j][4][0] = 0.0;
fjac[j][0][1] = -(u[k][j][i][1] * u[k][j][i][2]) * tmp2;
fjac[j][1][1] = u[k][j][i][2] * tmp1;
fjac[j][2][1] = u[k][j][i][1] * tmp1;
fjac[j][3][1] = 0.0;
fjac[j][4][1] = 0.0;
fjac[j][0][2] = -(u[k][j][i][2] * u[k][j][i][2] * tmp2) + c2 * qs[k][j][i];
fjac[j][1][2] = -c2 * u[k][j][i][1] * tmp1;
fjac[j][2][2] = (2.0 - c2) * u[k][j][i][2] * tmp1;
fjac[j][3][2] = -c2 * u[k][j][i][3] * tmp1;
fjac[j][4][2] = c2;
fjac[j][0][3] = -(u[k][j][i][2] * u[k][j][i][3]) * tmp2;
fjac[j][1][3] = 0.0;
fjac[j][2][3] = u[k][j][i][3] * tmp1;
fjac[j][3][3] = u[k][j][i][2] * tmp1;
fjac[j][4][3] = 0.0;
fjac[j][0][4] = (c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4]) * u[k][j][i][2] * tmp2;
fjac[j][1][4] = -c2 * u[k][j][i][1] * u[k][j][i][2] * tmp2;
fjac[j][2][4] = c1 * u[k][j][i][4] * tmp1 - c2 * (qs[k][j][i] + u[k][j][i][2] * u[k][j][i][2] * tmp2);
fjac[j][3][4] = -c2 * (u[k][j][i][2] * u[k][j][i][3]) * tmp2;
fjac[j][4][4] = c1 * u[k][j][i][2] * tmp1;
njac[j][0][0] = 0.0;
njac[j][1][0] = 0.0;
njac[j][2][0] = 0.0;
njac[j][3][0] = 0.0;
njac[j][4][0] = 0.0;
njac[j][0][1] = -c3c4 * tmp2 * u[k][j][i][1];
njac[j][1][1] = c3c4 * tmp1;
njac[j][2][1] = 0.0;
njac[j][3][1] = 0.0;
njac[j][4][1] = 0.0;
njac[j][0][2] = -con43 * c3c4 * tmp2 * u[k][j][i][2];
njac[j][1][2] = 0.0;
njac[j][2][2] = con43 * c3c4 * tmp1;
njac[j][3][2] = 0.0;
njac[j][4][2] = 0.0;
njac[j][0][3] = -c3c4 * tmp2 * u[k][j][i][3];
njac[j][1][3] = 0.0;
njac[j][2][3] = 0.0;
njac[j][3][3] = c3c4 * tmp1;
njac[j][4][3] = 0.0;
njac[j][0][4] = -(c3c4 - c1345) * tmp3 * (u[k][j][i][1] * u[k][j][i][1]) - (con43 * c3c4 - c1345) * tmp3 * (u[k][j][i][2] * u[k][j][i][2]) - (c3c4 - c1345) * tmp3 * (u[k][j][i][3] * u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4];
njac[j][1][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][1];
njac[j][2][4] = (con43 * c3c4 - c1345) * tmp2 * u[k][j][i][2];
njac[j][3][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][3];
njac[j][4][4] = (c1345) * tmp1;
}
//---------------------------------------------------------------------
// now joacobians set, so form left hand side in y direction
//---------------------------------------------------------------------
lhsinit(lhs, jsize);
#pragma omp parallel for default(shared) private(j, tmp1, tmp2) firstprivate(jsize, dt, ty1, ty2, dy1, dy2, dy3, dy4, dy5, fjac, njac)
for(j = 1; j <= jsize - 1; j++) {
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[j][0][0][0] = -tmp2 * fjac[j - 1][0][0] - tmp1 * njac[j - 1][0][0] - tmp1 * dy1;
lhs[j][0][1][0] = -tmp2 * fjac[j - 1][1][0] - tmp1 * njac[j - 1][1][0];
lhs[j][0][2][0] = -tmp2 * fjac[j - 1][2][0] - tmp1 * njac[j - 1][2][0];
lhs[j][0][3][0] = -tmp2 * fjac[j - 1][3][0] - tmp1 * njac[j - 1][3][0];
lhs[j][0][4][0] = -tmp2 * fjac[j - 1][4][0] - tmp1 * njac[j - 1][4][0];
lhs[j][0][0][1] = -tmp2 * fjac[j - 1][0][1] - tmp1 * njac[j - 1][0][1];
lhs[j][0][1][1] = -tmp2 * fjac[j - 1][1][1] - tmp1 * njac[j - 1][1][1] - tmp1 * dy2;
lhs[j][0][2][1] = -tmp2 * fjac[j - 1][2][1] - tmp1 * njac[j - 1][2][1];
lhs[j][0][3][1] = -tmp2 * fjac[j - 1][3][1] - tmp1 * njac[j - 1][3][1];
lhs[j][0][4][1] = -tmp2 * fjac[j - 1][4][1] - tmp1 * njac[j - 1][4][1];
lhs[j][0][0][2] = -tmp2 * fjac[j - 1][0][2] - tmp1 * njac[j - 1][0][2];
lhs[j][0][1][2] = -tmp2 * fjac[j - 1][1][2] - tmp1 * njac[j - 1][1][2];
lhs[j][0][2][2] = -tmp2 * fjac[j - 1][2][2] - tmp1 * njac[j - 1][2][2] - tmp1 * dy3;
lhs[j][0][3][2] = -tmp2 * fjac[j - 1][3][2] - tmp1 * njac[j - 1][3][2];
lhs[j][0][4][2] = -tmp2 * fjac[j - 1][4][2] - tmp1 * njac[j - 1][4][2];
lhs[j][0][0][3] = -tmp2 * fjac[j - 1][0][3] - tmp1 * njac[j - 1][0][3];
lhs[j][0][1][3] = -tmp2 * fjac[j - 1][1][3] - tmp1 * njac[j - 1][1][3];
lhs[j][0][2][3] = -tmp2 * fjac[j - 1][2][3] - tmp1 * njac[j - 1][2][3];
lhs[j][0][3][3] = -tmp2 * fjac[j - 1][3][3] - tmp1 * njac[j - 1][3][3] - tmp1 * dy4;
lhs[j][0][4][3] = -tmp2 * fjac[j - 1][4][3] - tmp1 * njac[j - 1][4][3];
lhs[j][0][0][4] = -tmp2 * fjac[j - 1][0][4] - tmp1 * njac[j - 1][0][4];
lhs[j][0][1][4] = -tmp2 * fjac[j - 1][1][4] - tmp1 * njac[j - 1][1][4];
lhs[j][0][2][4] = -tmp2 * fjac[j - 1][2][4] - tmp1 * njac[j - 1][2][4];
lhs[j][0][3][4] = -tmp2 * fjac[j - 1][3][4] - tmp1 * njac[j - 1][3][4];
lhs[j][0][4][4] = -tmp2 * fjac[j - 1][4][4] - tmp1 * njac[j - 1][4][4] - tmp1 * dy5;
lhs[j][1][0][0] = 1.0 + tmp1 * 2.0 * njac[j][0][0] + tmp1 * 2.0 * dy1;
lhs[j][1][1][0] = tmp1 * 2.0 * njac[j][1][0];
lhs[j][1][2][0] = tmp1 * 2.0 * njac[j][2][0];
lhs[j][1][3][0] = tmp1 * 2.0 * njac[j][3][0];
lhs[j][1][4][0] = tmp1 * 2.0 * njac[j][4][0];
lhs[j][1][0][1] = tmp1 * 2.0 * njac[j][0][1];
lhs[j][1][1][1] = 1.0 + tmp1 * 2.0 * njac[j][1][1] + tmp1 * 2.0 * dy2;
lhs[j][1][2][1] = tmp1 * 2.0 * njac[j][2][1];
lhs[j][1][3][1] = tmp1 * 2.0 * njac[j][3][1];
lhs[j][1][4][1] = tmp1 * 2.0 * njac[j][4][1];
lhs[j][1][0][2] = tmp1 * 2.0 * njac[j][0][2];
lhs[j][1][1][2] = tmp1 * 2.0 * njac[j][1][2];
lhs[j][1][2][2] = 1.0 + tmp1 * 2.0 * njac[j][2][2] + tmp1 * 2.0 * dy3;
lhs[j][1][3][2] = tmp1 * 2.0 * njac[j][3][2];
lhs[j][1][4][2] = tmp1 * 2.0 * njac[j][4][2];
lhs[j][1][0][3] = tmp1 * 2.0 * njac[j][0][3];
lhs[j][1][1][3] = tmp1 * 2.0 * njac[j][1][3];
lhs[j][1][2][3] = tmp1 * 2.0 * njac[j][2][3];
lhs[j][1][3][3] = 1.0 + tmp1 * 2.0 * njac[j][3][3] + tmp1 * 2.0 * dy4;
lhs[j][1][4][3] = tmp1 * 2.0 * njac[j][4][3];
lhs[j][1][0][4] = tmp1 * 2.0 * njac[j][0][4];
lhs[j][1][1][4] = tmp1 * 2.0 * njac[j][1][4];
lhs[j][1][2][4] = tmp1 * 2.0 * njac[j][2][4];
lhs[j][1][3][4] = tmp1 * 2.0 * njac[j][3][4];
lhs[j][1][4][4] = 1.0 + tmp1 * 2.0 * njac[j][4][4] + tmp1 * 2.0 * dy5;
lhs[j][2][0][0] = tmp2 * fjac[j + 1][0][0] - tmp1 * njac[j + 1][0][0] - tmp1 * dy1;
lhs[j][2][1][0] = tmp2 * fjac[j + 1][1][0] - tmp1 * njac[j + 1][1][0];
lhs[j][2][2][0] = tmp2 * fjac[j + 1][2][0] - tmp1 * njac[j + 1][2][0];
lhs[j][2][3][0] = tmp2 * fjac[j + 1][3][0] - tmp1 * njac[j + 1][3][0];
lhs[j][2][4][0] = tmp2 * fjac[j + 1][4][0] - tmp1 * njac[j + 1][4][0];
lhs[j][2][0][1] = tmp2 * fjac[j + 1][0][1] - tmp1 * njac[j + 1][0][1];
lhs[j][2][1][1] = tmp2 * fjac[j + 1][1][1] - tmp1 * njac[j + 1][1][1] - tmp1 * dy2;
lhs[j][2][2][1] = tmp2 * fjac[j + 1][2][1] - tmp1 * njac[j + 1][2][1];
lhs[j][2][3][1] = tmp2 * fjac[j + 1][3][1] - tmp1 * njac[j + 1][3][1];
lhs[j][2][4][1] = tmp2 * fjac[j + 1][4][1] - tmp1 * njac[j + 1][4][1];
lhs[j][2][0][2] = tmp2 * fjac[j + 1][0][2] - tmp1 * njac[j + 1][0][2];
lhs[j][2][1][2] = tmp2 * fjac[j + 1][1][2] - tmp1 * njac[j + 1][1][2];
lhs[j][2][2][2] = tmp2 * fjac[j + 1][2][2] - tmp1 * njac[j + 1][2][2] - tmp1 * dy3;
lhs[j][2][3][2] = tmp2 * fjac[j + 1][3][2] - tmp1 * njac[j + 1][3][2];
lhs[j][2][4][2] = tmp2 * fjac[j + 1][4][2] - tmp1 * njac[j + 1][4][2];
lhs[j][2][0][3] = tmp2 * fjac[j + 1][0][3] - tmp1 * njac[j + 1][0][3];
lhs[j][2][1][3] = tmp2 * fjac[j + 1][1][3] - tmp1 * njac[j + 1][1][3];
lhs[j][2][2][3] = tmp2 * fjac[j + 1][2][3] - tmp1 * njac[j + 1][2][3];
lhs[j][2][3][3] = tmp2 * fjac[j + 1][3][3] - tmp1 * njac[j + 1][3][3] - tmp1 * dy4;
lhs[j][2][4][3] = tmp2 * fjac[j + 1][4][3] - tmp1 * njac[j + 1][4][3];
lhs[j][2][0][4] = tmp2 * fjac[j + 1][0][4] - tmp1 * njac[j + 1][0][4];
lhs[j][2][1][4] = tmp2 * fjac[j + 1][1][4] - tmp1 * njac[j + 1][1][4];
lhs[j][2][2][4] = tmp2 * fjac[j + 1][2][4] - tmp1 * njac[j + 1][2][4];
lhs[j][2][3][4] = tmp2 * fjac[j + 1][3][4] - tmp1 * njac[j + 1][3][4];
lhs[j][2][4][4] = tmp2 * fjac[j + 1][4][4] - tmp1 * njac[j + 1][4][4] - tmp1 * dy5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(JMAX) and rhs'(JMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][0][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs(lhs[0][1], lhs[0][2], rhs[k][0][i]);
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhs use : RW
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(j = 1; j <= jsize - 1; j++) {
//-------------------------------------------------------------------
// subtract A*lhs_vector(j-1) from lhs_vector(j)
//
// rhs(j) = rhs(j) - A*rhs(j-1)
//-------------------------------------------------------------------
matvec_sub(lhs[j][0], rhs[k][j - 1][i], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(j) = B(j) - C(j-1)*A(j)
//-------------------------------------------------------------------
matmul_sub(lhs[j][0], lhs[j - 1][2], lhs[j][1]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][0][i] by b_inverse[k][0][i] and copy to rhs
//-------------------------------------------------------------------
binvcrhs(lhs[j][1], lhs[j][2], rhs[k][j][i]);
}
//---------------------------------------------------------------------
// rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[jsize][0], rhs[k][jsize - 1][i], rhs[k][jsize][i]);
//---------------------------------------------------------------------
// B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
// matmul_sub(AA,i,jsize,k,c,
// $ CC,i,jsize-1,k,c,BB,i,jsize,k)
//---------------------------------------------------------------------
matmul_sub(lhs[jsize][0], lhs[jsize - 1][2], lhs[jsize][1]);
//---------------------------------------------------------------------
// multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
//---------------------------------------------------------------------
binvrhs(lhs[jsize][1], rhs[k][jsize][i]);
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(jsize)=rhs(jsize)
// else assume U(jsize) is loaded in un pack backsub_info
// so just use it
// after u(jstart) will be sent to next cell
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(j = jsize - 1; j >= 0; j--) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(n = 0; n < 5; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][2][n][m] * rhs[k][j + 1][i][n];
}
}
}
}
}
}
//---------------------------------------------------------------------
// Performs line solves in Z direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void z_solve() {
int i, j, k, m, n, ksize;
double fjac[25][5][5];
double njac[25][5][5];
double lhs[25][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side for the three z-factors
//---------------------------------------------------------------------
ksize = grid_points[2] - 1;
//---------------------------------------------------------------------
// Compute the indices for storing the block-diagonal matrix;
// determine c (labeled f) and s jacobians
//---------------------------------------------------------------------
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(j = 1; j <= grid_points[1] - 2; j++) {
/*************** Clava msgError **************
consoleOutput petit: AddSSAgraph: too many SSA graph nodes
Exit apparently due to system limitation or error (exit code -2)
Not dumping core - set PETIT_DUMP_CORE to generate core dump
****************************************/
for(i = 1; i <= grid_points[0] - 2; i++) {
#pragma omp parallel for default(shared) private(k, tmp1, tmp2, tmp3) firstprivate(ksize, j, i, c2, c1, c3c4, con43, c3, c4, c1345, u, qs, square)
for(k = 0; k <= ksize; k++) {
tmp1 = 1.0 / u[k][j][i][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[k][0][0] = 0.0;
fjac[k][1][0] = 0.0;
fjac[k][2][0] = 0.0;
fjac[k][3][0] = 1.0;
fjac[k][4][0] = 0.0;
fjac[k][0][1] = -(u[k][j][i][1] * u[k][j][i][3]) * tmp2;
fjac[k][1][1] = u[k][j][i][3] * tmp1;
fjac[k][2][1] = 0.0;
fjac[k][3][1] = u[k][j][i][1] * tmp1;
fjac[k][4][1] = 0.0;
fjac[k][0][2] = -(u[k][j][i][2] * u[k][j][i][3]) * tmp2;
fjac[k][1][2] = 0.0;
fjac[k][2][2] = u[k][j][i][3] * tmp1;
fjac[k][3][2] = u[k][j][i][2] * tmp1;
fjac[k][4][2] = 0.0;
fjac[k][0][3] = -(u[k][j][i][3] * u[k][j][i][3] * tmp2) + c2 * qs[k][j][i];
fjac[k][1][3] = -c2 * u[k][j][i][1] * tmp1;
fjac[k][2][3] = -c2 * u[k][j][i][2] * tmp1;
fjac[k][3][3] = (2.0 - c2) * u[k][j][i][3] * tmp1;
fjac[k][4][3] = c2;
fjac[k][0][4] = (c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4]) * u[k][j][i][3] * tmp2;
fjac[k][1][4] = -c2 * (u[k][j][i][1] * u[k][j][i][3]) * tmp2;
fjac[k][2][4] = -c2 * (u[k][j][i][2] * u[k][j][i][3]) * tmp2;
fjac[k][3][4] = c1 * (u[k][j][i][4] * tmp1) - c2 * (qs[k][j][i] + u[k][j][i][3] * u[k][j][i][3] * tmp2);
fjac[k][4][4] = c1 * u[k][j][i][3] * tmp1;
njac[k][0][0] = 0.0;
njac[k][1][0] = 0.0;
njac[k][2][0] = 0.0;
njac[k][3][0] = 0.0;
njac[k][4][0] = 0.0;
njac[k][0][1] = -c3c4 * tmp2 * u[k][j][i][1];
njac[k][1][1] = c3c4 * tmp1;
njac[k][2][1] = 0.0;
njac[k][3][1] = 0.0;
njac[k][4][1] = 0.0;
njac[k][0][2] = -c3c4 * tmp2 * u[k][j][i][2];
njac[k][1][2] = 0.0;
njac[k][2][2] = c3c4 * tmp1;
njac[k][3][2] = 0.0;
njac[k][4][2] = 0.0;
njac[k][0][3] = -con43 * c3c4 * tmp2 * u[k][j][i][3];
njac[k][1][3] = 0.0;
njac[k][2][3] = 0.0;
njac[k][3][3] = con43 * c3 * c4 * tmp1;
njac[k][4][3] = 0.0;
njac[k][0][4] = -(c3c4 - c1345) * tmp3 * (u[k][j][i][1] * u[k][j][i][1]) - (c3c4 - c1345) * tmp3 * (u[k][j][i][2] * u[k][j][i][2]) - (con43 * c3c4 - c1345) * tmp3 * (u[k][j][i][3] * u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4];
njac[k][1][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][1];
njac[k][2][4] = (c3c4 - c1345) * tmp2 * u[k][j][i][2];
njac[k][3][4] = (con43 * c3c4 - c1345) * tmp2 * u[k][j][i][3];
njac[k][4][4] = (c1345) * tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in z direction
//---------------------------------------------------------------------
lhsinit(lhs, ksize);
#pragma omp parallel for default(shared) private(k, tmp1, tmp2) firstprivate(ksize, dt, tz1, tz2, dz1, dz2, dz3, dz4, dz5, fjac, njac)
for(k = 1; k <= ksize - 1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[k][0][0][0] = -tmp2 * fjac[k - 1][0][0] - tmp1 * njac[k - 1][0][0] - tmp1 * dz1;
lhs[k][0][1][0] = -tmp2 * fjac[k - 1][1][0] - tmp1 * njac[k - 1][1][0];
lhs[k][0][2][0] = -tmp2 * fjac[k - 1][2][0] - tmp1 * njac[k - 1][2][0];
lhs[k][0][3][0] = -tmp2 * fjac[k - 1][3][0] - tmp1 * njac[k - 1][3][0];
lhs[k][0][4][0] = -tmp2 * fjac[k - 1][4][0] - tmp1 * njac[k - 1][4][0];
lhs[k][0][0][1] = -tmp2 * fjac[k - 1][0][1] - tmp1 * njac[k - 1][0][1];
lhs[k][0][1][1] = -tmp2 * fjac[k - 1][1][1] - tmp1 * njac[k - 1][1][1] - tmp1 * dz2;
lhs[k][0][2][1] = -tmp2 * fjac[k - 1][2][1] - tmp1 * njac[k - 1][2][1];
lhs[k][0][3][1] = -tmp2 * fjac[k - 1][3][1] - tmp1 * njac[k - 1][3][1];
lhs[k][0][4][1] = -tmp2 * fjac[k - 1][4][1] - tmp1 * njac[k - 1][4][1];
lhs[k][0][0][2] = -tmp2 * fjac[k - 1][0][2] - tmp1 * njac[k - 1][0][2];
lhs[k][0][1][2] = -tmp2 * fjac[k - 1][1][2] - tmp1 * njac[k - 1][1][2];
lhs[k][0][2][2] = -tmp2 * fjac[k - 1][2][2] - tmp1 * njac[k - 1][2][2] - tmp1 * dz3;
lhs[k][0][3][2] = -tmp2 * fjac[k - 1][3][2] - tmp1 * njac[k - 1][3][2];
lhs[k][0][4][2] = -tmp2 * fjac[k - 1][4][2] - tmp1 * njac[k - 1][4][2];
lhs[k][0][0][3] = -tmp2 * fjac[k - 1][0][3] - tmp1 * njac[k - 1][0][3];
lhs[k][0][1][3] = -tmp2 * fjac[k - 1][1][3] - tmp1 * njac[k - 1][1][3];
lhs[k][0][2][3] = -tmp2 * fjac[k - 1][2][3] - tmp1 * njac[k - 1][2][3];
lhs[k][0][3][3] = -tmp2 * fjac[k - 1][3][3] - tmp1 * njac[k - 1][3][3] - tmp1 * dz4;
lhs[k][0][4][3] = -tmp2 * fjac[k - 1][4][3] - tmp1 * njac[k - 1][4][3];
lhs[k][0][0][4] = -tmp2 * fjac[k - 1][0][4] - tmp1 * njac[k - 1][0][4];
lhs[k][0][1][4] = -tmp2 * fjac[k - 1][1][4] - tmp1 * njac[k - 1][1][4];
lhs[k][0][2][4] = -tmp2 * fjac[k - 1][2][4] - tmp1 * njac[k - 1][2][4];
lhs[k][0][3][4] = -tmp2 * fjac[k - 1][3][4] - tmp1 * njac[k - 1][3][4];
lhs[k][0][4][4] = -tmp2 * fjac[k - 1][4][4] - tmp1 * njac[k - 1][4][4] - tmp1 * dz5;
lhs[k][1][0][0] = 1.0 + tmp1 * 2.0 * njac[k][0][0] + tmp1 * 2.0 * dz1;
lhs[k][1][1][0] = tmp1 * 2.0 * njac[k][1][0];
lhs[k][1][2][0] = tmp1 * 2.0 * njac[k][2][0];
lhs[k][1][3][0] = tmp1 * 2.0 * njac[k][3][0];
lhs[k][1][4][0] = tmp1 * 2.0 * njac[k][4][0];
lhs[k][1][0][1] = tmp1 * 2.0 * njac[k][0][1];
lhs[k][1][1][1] = 1.0 + tmp1 * 2.0 * njac[k][1][1] + tmp1 * 2.0 * dz2;
lhs[k][1][2][1] = tmp1 * 2.0 * njac[k][2][1];
lhs[k][1][3][1] = tmp1 * 2.0 * njac[k][3][1];
lhs[k][1][4][1] = tmp1 * 2.0 * njac[k][4][1];
lhs[k][1][0][2] = tmp1 * 2.0 * njac[k][0][2];
lhs[k][1][1][2] = tmp1 * 2.0 * njac[k][1][2];
lhs[k][1][2][2] = 1.0 + tmp1 * 2.0 * njac[k][2][2] + tmp1 * 2.0 * dz3;
lhs[k][1][3][2] = tmp1 * 2.0 * njac[k][3][2];
lhs[k][1][4][2] = tmp1 * 2.0 * njac[k][4][2];
lhs[k][1][0][3] = tmp1 * 2.0 * njac[k][0][3];
lhs[k][1][1][3] = tmp1 * 2.0 * njac[k][1][3];
lhs[k][1][2][3] = tmp1 * 2.0 * njac[k][2][3];
lhs[k][1][3][3] = 1.0 + tmp1 * 2.0 * njac[k][3][3] + tmp1 * 2.0 * dz4;
lhs[k][1][4][3] = tmp1 * 2.0 * njac[k][4][3];
lhs[k][1][0][4] = tmp1 * 2.0 * njac[k][0][4];
lhs[k][1][1][4] = tmp1 * 2.0 * njac[k][1][4];
lhs[k][1][2][4] = tmp1 * 2.0 * njac[k][2][4];
lhs[k][1][3][4] = tmp1 * 2.0 * njac[k][3][4];
lhs[k][1][4][4] = 1.0 + tmp1 * 2.0 * njac[k][4][4] + tmp1 * 2.0 * dz5;
lhs[k][2][0][0] = tmp2 * fjac[k + 1][0][0] - tmp1 * njac[k + 1][0][0] - tmp1 * dz1;
lhs[k][2][1][0] = tmp2 * fjac[k + 1][1][0] - tmp1 * njac[k + 1][1][0];
lhs[k][2][2][0] = tmp2 * fjac[k + 1][2][0] - tmp1 * njac[k + 1][2][0];
lhs[k][2][3][0] = tmp2 * fjac[k + 1][3][0] - tmp1 * njac[k + 1][3][0];
lhs[k][2][4][0] = tmp2 * fjac[k + 1][4][0] - tmp1 * njac[k + 1][4][0];
lhs[k][2][0][1] = tmp2 * fjac[k + 1][0][1] - tmp1 * njac[k + 1][0][1];
lhs[k][2][1][1] = tmp2 * fjac[k + 1][1][1] - tmp1 * njac[k + 1][1][1] - tmp1 * dz2;
lhs[k][2][2][1] = tmp2 * fjac[k + 1][2][1] - tmp1 * njac[k + 1][2][1];
lhs[k][2][3][1] = tmp2 * fjac[k + 1][3][1] - tmp1 * njac[k + 1][3][1];
lhs[k][2][4][1] = tmp2 * fjac[k + 1][4][1] - tmp1 * njac[k + 1][4][1];
lhs[k][2][0][2] = tmp2 * fjac[k + 1][0][2] - tmp1 * njac[k + 1][0][2];
lhs[k][2][1][2] = tmp2 * fjac[k + 1][1][2] - tmp1 * njac[k + 1][1][2];
lhs[k][2][2][2] = tmp2 * fjac[k + 1][2][2] - tmp1 * njac[k + 1][2][2] - tmp1 * dz3;
lhs[k][2][3][2] = tmp2 * fjac[k + 1][3][2] - tmp1 * njac[k + 1][3][2];
lhs[k][2][4][2] = tmp2 * fjac[k + 1][4][2] - tmp1 * njac[k + 1][4][2];
lhs[k][2][0][3] = tmp2 * fjac[k + 1][0][3] - tmp1 * njac[k + 1][0][3];
lhs[k][2][1][3] = tmp2 * fjac[k + 1][1][3] - tmp1 * njac[k + 1][1][3];
lhs[k][2][2][3] = tmp2 * fjac[k + 1][2][3] - tmp1 * njac[k + 1][2][3];
lhs[k][2][3][3] = tmp2 * fjac[k + 1][3][3] - tmp1 * njac[k + 1][3][3] - tmp1 * dz4;
lhs[k][2][4][3] = tmp2 * fjac[k + 1][4][3] - tmp1 * njac[k + 1][4][3];
lhs[k][2][0][4] = tmp2 * fjac[k + 1][0][4] - tmp1 * njac[k + 1][0][4];
lhs[k][2][1][4] = tmp2 * fjac[k + 1][1][4] - tmp1 * njac[k + 1][1][4];
lhs[k][2][2][4] = tmp2 * fjac[k + 1][2][4] - tmp1 * njac[k + 1][2][4];
lhs[k][2][3][4] = tmp2 * fjac[k + 1][3][4] - tmp1 * njac[k + 1][3][4];
lhs[k][2][4][4] = tmp2 * fjac[k + 1][4][4] - tmp1 * njac[k + 1][4][4] - tmp1 * dz5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(KMAX) and rhs'(KMAX) will be sent to next cell.
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[0][j][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs(lhs[0][1], lhs[0][2], rhs[0][j][i]);
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess lhs use : RW
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(k = 1; k <= ksize - 1; k++) {
//-------------------------------------------------------------------
// subtract A*lhs_vector(k-1) from lhs_vector(k)
//
// rhs(k) = rhs(k) - A*rhs(k-1)
//-------------------------------------------------------------------
matvec_sub(lhs[k][0], rhs[k - 1][j][i], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(k) = B(k) - C(k-1)*A(k)
// matmul_sub(AA,i,j,k,c,CC,i,j,k-1,c,BB,i,j,k)
//-------------------------------------------------------------------
matmul_sub(lhs[k][0], lhs[k - 1][2], lhs[k][1]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[0][j][i] by b_inverse[0][j][i] and copy to rhs
//-------------------------------------------------------------------
binvcrhs(lhs[k][1], lhs[k][2], rhs[k][j][i]);
}
//---------------------------------------------------------------------
// Now finish up special cases for last cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[ksize][0], rhs[ksize - 1][j][i], rhs[ksize][j][i]);
//---------------------------------------------------------------------
// B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
// matmul_sub(AA,i,j,ksize,c,
// $ CC,i,j,ksize-1,c,BB,i,j,ksize)
//---------------------------------------------------------------------
matmul_sub(lhs[ksize][0], lhs[ksize - 1][2], lhs[ksize][1]);
//---------------------------------------------------------------------
// multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
//---------------------------------------------------------------------
binvrhs(lhs[ksize][1], rhs[ksize][j][i]);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(ksize)=rhs(ksize)
// else assume U(ksize) is loaded in un pack backsub_info
// so just use it
// after u(kstart) will be sent to next cell
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess rhs use : RW
****************************************/
for(k = ksize - 1; k >= 0; k--) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(m = 0; m < 5; m++) {
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(n = 0; n < 5; n++) {
rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][2][n][m] * rhs[k + 1][j][i][n];
}
}
}
}
}
}
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified) {
char size[16];
int j;
printf("\n\n %s Benchmark Completed.\n", name);
printf(" Class = %12c\n", class);
// If this is not a grid-based problem (EP, FT, CG), then
// we only print n1, which contains some measure of the
// problem size. In that case, n2 and n3 are both zero.
// Otherwise, we print the grid size n1xn2xn3
if((n2 == 0) && (n3 == 0)) {
if((name[0] == 'E') && (name[1] == 'P')) {
sprintf(size, "%15.0lf", pow(2.0, n1));
j = 14;
if(size[j] == '.') {
size[j] = ' ';
j--;
}
size[j + 1] = '\0';
printf(" Size = %15s\n", size);
}
else {
printf(" Size = %12d\n", n1);
}
}
else {
printf(" Size = %4dx%4dx%4d\n", n1, n2, n3);
}
printf(" Iterations = %12d\n", niter);
printf(" Time in seconds = %12.2lf\n", t);
printf(" Mop/s total = %15.2lf\n", mops);
printf(" Operation type = %24s\n", optype);
if(verified) printf(" Verification = %12s\n", "SUCCESSFUL");
else printf(" Verification = %12s\n", "UNSUCCESSFUL");
}
void wtime(double *t) {
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *) 0);
if(sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time() {
double t;
wtime(&t);
return (t);
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear(int n) {
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start(int n) {
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop(int n) {
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read(int n) {
return (elapsed[n]);
}
|
kmp_stats.h | #ifndef KMP_STATS_H
#define KMP_STATS_H
/** @file kmp_stats.h
* Functions for collecting statistics.
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "kmp_config.h"
#include "kmp_debug.h"
#if KMP_STATS_ENABLED
/* Statistics accumulator.
Accumulates number of samples and computes min, max, mean, standard deviation
on the fly.
Online variance calculation algorithm from
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm
*/
#error #include "kmp_stats_timing.h"
#include <limits>
#include <math.h>
#include <new> // placement new
#include <stdint.h>
#include <string>
#include <vector>
/* Enable developer statistics here if you want them. They are more detailed
than is useful for application characterisation and are intended for the
runtime library developer. */
#define KMP_DEVELOPER_STATS 0
/* Enable/Disable histogram output */
#define KMP_STATS_HIST 0
/*!
* @ingroup STATS_GATHERING
* \brief flags to describe the statistic (timer or counter)
*
*/
enum stats_flags_e {
noTotal = 1 << 0, //!< do not show a TOTAL_aggregation for this statistic
onlyInMaster = 1 << 1, //!< statistic is valid only for primary thread
noUnits = 1 << 2, //!< statistic doesn't need units printed next to it
notInMaster = 1 << 3, //!< statistic is valid only for non-primary threads
logEvent = 1 << 4 //!< statistic can be logged on the event timeline when
//! KMP_STATS_EVENTS is on (valid only for timers)
};
/*!
* @ingroup STATS_GATHERING
* \brief the states which a thread can be in
*
*/
enum stats_state_e {
IDLE,
SERIAL_REGION,
FORK_JOIN_BARRIER,
PLAIN_BARRIER,
TASKWAIT,
TASKYIELD,
TASKGROUP,
IMPLICIT_TASK,
EXPLICIT_TASK,
TEAMS_REGION
};
/*!
* \brief Add new counters under KMP_FOREACH_COUNTER() macro in kmp_stats.h
*
* @param macro a user defined macro that takes three arguments -
* macro(COUNTER_NAME, flags, arg)
* @param arg a user defined argument to send to the user defined macro
*
* \details A counter counts the occurrence of some event. Each thread
* accumulates its own count, at the end of execution the counts are aggregated
* treating each thread as a separate measurement. (Unless onlyInMaster is set,
* in which case there's only a single measurement). The min,mean,max are
* therefore the values for the threads. Adding the counter here and then
* putting a KMP_BLOCK_COUNTER(name) at the point you want to count is all you
* need to do. All of the tables and printing is generated from this macro.
* Format is "macro(name, flags, arg)"
*
* @ingroup STATS_GATHERING
*/
// clang-format off
#define KMP_FOREACH_COUNTER(macro, arg) \
macro(OMP_PARALLEL,stats_flags_e::onlyInMaster|stats_flags_e::noTotal,arg) \
macro(OMP_NESTED_PARALLEL, 0, arg) \
macro(OMP_LOOP_STATIC, 0, arg) \
macro(OMP_LOOP_STATIC_STEAL, 0, arg) \
macro(OMP_LOOP_DYNAMIC, 0, arg) \
macro(OMP_DISTRIBUTE, 0, arg) \
macro(OMP_BARRIER, 0, arg) \
macro(OMP_CRITICAL, 0, arg) \
macro(OMP_SINGLE, 0, arg) \
macro(OMP_MASTER, 0, arg) \
macro(OMP_MASKED, 0, arg) \
macro(OMP_TEAMS, 0, arg) \
macro(OMP_set_lock, 0, arg) \
macro(OMP_test_lock, 0, arg) \
macro(REDUCE_wait, 0, arg) \
macro(REDUCE_nowait, 0, arg) \
macro(OMP_TASKYIELD, 0, arg) \
macro(OMP_TASKLOOP, 0, arg) \
macro(TASK_executed, 0, arg) \
macro(TASK_cancelled, 0, arg) \
macro(TASK_stolen, 0, arg)
// clang-format on
/*!
* \brief Add new timers under KMP_FOREACH_TIMER() macro in kmp_stats.h
*
* @param macro a user defined macro that takes three arguments -
* macro(TIMER_NAME, flags, arg)
* @param arg a user defined argument to send to the user defined macro
*
* \details A timer collects multiple samples of some count in each thread and
* then finally aggregates all of the samples from all of the threads. For most
* timers the printing code also provides an aggregation over the thread totals.
* These are printed as TOTAL_foo. The count is normally a time (in ticks),
* hence the name "timer". (But can be any value, so we use this for "number of
* arguments passed to fork" as well). For timers the threads are not
* significant, it's the individual observations that count, so the statistics
* are at that level. Format is "macro(name, flags, arg)"
*
* @ingroup STATS_GATHERING2
*/
// clang-format off
#define KMP_FOREACH_TIMER(macro, arg) \
macro (OMP_worker_thread_life, stats_flags_e::logEvent, arg) \
macro (OMP_parallel, stats_flags_e::logEvent, arg) \
macro (OMP_parallel_overhead, stats_flags_e::logEvent, arg) \
macro (OMP_teams, stats_flags_e::logEvent, arg) \
macro (OMP_teams_overhead, stats_flags_e::logEvent, arg) \
macro (OMP_loop_static, 0, arg) \
macro (OMP_loop_static_scheduling, 0, arg) \
macro (OMP_loop_dynamic, 0, arg) \
macro (OMP_loop_dynamic_scheduling, 0, arg) \
macro (OMP_distribute, 0, arg) \
macro (OMP_distribute_scheduling, 0, arg) \
macro (OMP_critical, 0, arg) \
macro (OMP_critical_wait, 0, arg) \
macro (OMP_single, 0, arg) \
macro (OMP_master, 0, arg) \
macro (OMP_masked, 0, arg) \
macro (OMP_task_immediate, 0, arg) \
macro (OMP_task_taskwait, 0, arg) \
macro (OMP_task_taskyield, 0, arg) \
macro (OMP_task_taskgroup, 0, arg) \
macro (OMP_task_join_bar, 0, arg) \
macro (OMP_task_plain_bar, 0, arg) \
macro (OMP_taskloop_scheduling, 0, arg) \
macro (OMP_plain_barrier, stats_flags_e::logEvent, arg) \
macro (OMP_idle, stats_flags_e::logEvent, arg) \
macro (OMP_fork_barrier, stats_flags_e::logEvent, arg) \
macro (OMP_join_barrier, stats_flags_e::logEvent, arg) \
macro (OMP_serial, stats_flags_e::logEvent, arg) \
macro (OMP_set_numthreads, stats_flags_e::noUnits | stats_flags_e::noTotal, \
arg) \
macro (OMP_PARALLEL_args, stats_flags_e::noUnits | stats_flags_e::noTotal, \
arg) \
macro (OMP_loop_static_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (OMP_loop_static_total_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (OMP_loop_dynamic_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (OMP_loop_dynamic_total_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (OMP_distribute_iterations, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
KMP_FOREACH_DEVELOPER_TIMER(macro, arg)
// clang-format on
// OMP_worker_thread_life -- Time from thread becoming an OpenMP thread (either
// initializing OpenMP or being created by a primary
// thread) until the thread is destroyed
// OMP_parallel -- Time thread spends executing work directly
// within a #pragma omp parallel
// OMP_parallel_overhead -- Time thread spends setting up a parallel region
// OMP_loop_static -- Time thread spends executing loop iterations from
// a statically scheduled loop
// OMP_loop_static_scheduling -- Time thread spends scheduling loop iterations
// from a statically scheduled loop
// OMP_loop_dynamic -- Time thread spends executing loop iterations from
// a dynamically scheduled loop
// OMP_loop_dynamic_scheduling -- Time thread spends scheduling loop iterations
// from a dynamically scheduled loop
// OMP_critical -- Time thread spends executing critical section
// OMP_critical_wait -- Time thread spends waiting to enter
// a critical section
// OMP_single -- Time spent executing a "single" region
// OMP_master -- Time spent executing a "master" region
// OMP_masked -- Time spent executing a "masked" region
// OMP_task_immediate -- Time spent executing non-deferred tasks
// OMP_task_taskwait -- Time spent executing tasks inside a taskwait
// construct
// OMP_task_taskyield -- Time spent executing tasks inside a taskyield
// construct
// OMP_task_taskgroup -- Time spent executing tasks inside a taskygroup
// construct
// OMP_task_join_bar -- Time spent executing tasks inside a join barrier
// OMP_task_plain_bar -- Time spent executing tasks inside a barrier
// construct
// OMP_taskloop_scheduling -- Time spent scheduling tasks inside a taskloop
// construct
// OMP_plain_barrier -- Time spent in a #pragma omp barrier construct or
// inside implicit barrier at end of worksharing
// construct
// OMP_idle -- Time worker threads spend waiting for next
// parallel region
// OMP_fork_barrier -- Time spent in a the fork barrier surrounding a
// parallel region
// OMP_join_barrier -- Time spent in a the join barrier surrounding a
// parallel region
// OMP_serial -- Time thread zero spends executing serial code
// OMP_set_numthreads -- Values passed to omp_set_num_threads
// OMP_PARALLEL_args -- Number of arguments passed to a parallel region
// OMP_loop_static_iterations -- Number of iterations thread is assigned for
// statically scheduled loops
// OMP_loop_dynamic_iterations -- Number of iterations thread is assigned for
// dynamically scheduled loops
#if (KMP_DEVELOPER_STATS)
// Timers which are of interest to runtime library developers, not end users.
// These have to be explicitly enabled in addition to the other stats.
// KMP_fork_barrier -- time in __kmp_fork_barrier
// KMP_join_barrier -- time in __kmp_join_barrier
// KMP_barrier -- time in __kmp_barrier
// KMP_end_split_barrier -- time in __kmp_end_split_barrier
// KMP_setup_icv_copy -- time in __kmp_setup_icv_copy
// KMP_icv_copy -- start/stop timer for any ICV copying
// KMP_linear_gather -- time in __kmp_linear_barrier_gather
// KMP_linear_release -- time in __kmp_linear_barrier_release
// KMP_tree_gather -- time in __kmp_tree_barrier_gather
// KMP_tree_release -- time in __kmp_tree_barrier_release
// KMP_hyper_gather -- time in __kmp_hyper_barrier_gather
// KMP_hyper_release -- time in __kmp_hyper_barrier_release
// clang-format off
#define KMP_FOREACH_DEVELOPER_TIMER(macro, arg) \
macro(KMP_fork_call, 0, arg) \
macro(KMP_join_call, 0, arg) \
macro(KMP_end_split_barrier, 0, arg) \
macro(KMP_hier_gather, 0, arg) \
macro(KMP_hier_release, 0, arg) \
macro(KMP_hyper_gather, 0, arg) \
macro(KMP_hyper_release, 0, arg) \
macro(KMP_linear_gather, 0, arg) \
macro(KMP_linear_release, 0, arg) \
macro(KMP_tree_gather, 0, arg) \
macro(KMP_tree_release, 0, arg) \
macro(USER_resume, 0, arg) \
macro(USER_suspend, 0, arg) \
macro(USER_mwait, 0, arg) \
macro(KMP_allocate_team, 0, arg) \
macro(KMP_setup_icv_copy, 0, arg) \
macro(USER_icv_copy, 0, arg) \
macro (FOR_static_steal_stolen, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg) \
macro (FOR_static_steal_chunks, \
stats_flags_e::noUnits | stats_flags_e::noTotal, arg)
#else
#define KMP_FOREACH_DEVELOPER_TIMER(macro, arg)
#endif
// clang-format on
/*!
* \brief Add new explicit timers under KMP_FOREACH_EXPLICIT_TIMER() macro.
*
* @param macro a user defined macro that takes three arguments -
* macro(TIMER_NAME, flags, arg)
* @param arg a user defined argument to send to the user defined macro
*
* \warning YOU MUST HAVE THE SAME NAMED TIMER UNDER KMP_FOREACH_TIMER() OR ELSE
* BAD THINGS WILL HAPPEN!
*
* \details Explicit timers are ones where we need to allocate a timer itself
* (as well as the accumulated timing statistics). We allocate these on a
* per-thread basis, and explicitly start and stop them. Block timers just
* allocate the timer itself on the stack, and use the destructor to notice
* block exit; they don't need to be defined here. The name here should be the
* same as that of a timer above.
*
* @ingroup STATS_GATHERING
*/
#define KMP_FOREACH_EXPLICIT_TIMER(macro, arg) KMP_FOREACH_TIMER(macro, arg)
#define ENUMERATE(name, ignore, prefix) prefix##name,
enum timer_e { KMP_FOREACH_TIMER(ENUMERATE, TIMER_) TIMER_LAST };
enum explicit_timer_e {
KMP_FOREACH_EXPLICIT_TIMER(ENUMERATE, EXPLICIT_TIMER_) EXPLICIT_TIMER_LAST
};
enum counter_e { KMP_FOREACH_COUNTER(ENUMERATE, COUNTER_) COUNTER_LAST };
#undef ENUMERATE
/*
* A logarithmic histogram. It accumulates the number of values in each power of
* ten bin. So 1<=x<10, 10<=x<100, ...
* Mostly useful where we have some big outliers and want to see information
* about them.
*/
class logHistogram {
enum {
numBins = 31, /* Number of powers of 10. If this changes you need to change
* the initializer for binMax */
/*
* If you want to use this to analyse values that may be less than 1, (for
* instance times in s), then the logOffset gives you negative powers.
* In our case here, we're just looking at times in ticks, or counts, so we
* can never see values with magnitude < 1 (other than zero), so we can set
* it to 0. As above change the initializer if you change this.
*/
logOffset = 0
};
uint32_t KMP_ALIGN_CACHE zeroCount;
struct {
uint32_t count;
double total;
} bins[numBins];
static double binMax[numBins];
#ifdef KMP_DEBUG
uint64_t _total;
void check() const {
uint64_t t = zeroCount;
for (int i = 0; i < numBins; i++)
t += bins[i].count;
KMP_DEBUG_ASSERT(t == _total);
}
#else
void check() const {}
#endif
public:
logHistogram() { reset(); }
logHistogram(logHistogram const &o) {
for (int i = 0; i < numBins; i++)
bins[i] = o.bins[i];
#ifdef KMP_DEBUG
_total = o._total;
#endif
}
void reset() {
zeroCount = 0;
for (int i = 0; i < numBins; i++) {
bins[i].count = 0;
bins[i].total = 0;
}
#ifdef KMP_DEBUG
_total = 0;
#endif
}
uint32_t count(int b) const { return bins[b + logOffset].count; }
double total(int b) const { return bins[b + logOffset].total; }
static uint32_t findBin(double sample);
logHistogram &operator+=(logHistogram const &o) {
zeroCount += o.zeroCount;
for (int i = 0; i < numBins; i++) {
bins[i].count += o.bins[i].count;
bins[i].total += o.bins[i].total;
}
#ifdef KMP_DEBUG
_total += o._total;
check();
#endif
return *this;
}
void addSample(double sample);
int minBin() const;
int maxBin() const;
std::string format(char) const;
};
class statistic {
double KMP_ALIGN_CACHE minVal;
double maxVal;
double meanVal;
double m2;
uint64_t sampleCount;
double offset;
bool collectingHist;
logHistogram hist;
public:
statistic(bool doHist = bool(KMP_STATS_HIST)) {
reset();
collectingHist = doHist;
}
statistic(statistic const &o)
: minVal(o.minVal), maxVal(o.maxVal), meanVal(o.meanVal), m2(o.m2),
sampleCount(o.sampleCount), offset(o.offset),
collectingHist(o.collectingHist), hist(o.hist) {}
statistic(double minv, double maxv, double meanv, uint64_t sc, double sd)
: minVal(minv), maxVal(maxv), meanVal(meanv), m2(sd * sd * sc),
sampleCount(sc), offset(0.0), collectingHist(false) {}
bool haveHist() const { return collectingHist; }
double getMin() const { return minVal; }
double getMean() const { return meanVal; }
double getMax() const { return maxVal; }
uint64_t getCount() const { return sampleCount; }
double getSD() const { return sqrt(m2 / sampleCount); }
double getTotal() const { return sampleCount * meanVal; }
logHistogram const *getHist() const { return &hist; }
void setOffset(double d) { offset = d; }
void reset() {
minVal = (std::numeric_limits<double>::max)();
maxVal = -minVal;
meanVal = 0.0;
m2 = 0.0;
sampleCount = 0;
offset = 0.0;
hist.reset();
}
void addSample(double sample);
void scale(double factor);
void scaleDown(double f) { scale(1. / f); }
void forceCount(uint64_t count) { sampleCount = count; }
statistic &operator+=(statistic const &other);
std::string format(char unit, bool total = false) const;
std::string formatHist(char unit) const { return hist.format(unit); }
};
struct statInfo {
const char *name;
uint32_t flags;
};
class timeStat : public statistic {
static statInfo timerInfo[];
public:
timeStat() : statistic() {}
static const char *name(timer_e e) { return timerInfo[e].name; }
static bool noTotal(timer_e e) {
return timerInfo[e].flags & stats_flags_e::noTotal;
}
static bool masterOnly(timer_e e) {
return timerInfo[e].flags & stats_flags_e::onlyInMaster;
}
static bool workerOnly(timer_e e) {
return timerInfo[e].flags & stats_flags_e::notInMaster;
}
static bool noUnits(timer_e e) {
return timerInfo[e].flags & stats_flags_e::noUnits;
}
static bool logEvent(timer_e e) {
return timerInfo[e].flags & stats_flags_e::logEvent;
}
static void clearEventFlags() {
for (int i = 0; i < TIMER_LAST; i++) {
timerInfo[i].flags &= (~(stats_flags_e::logEvent));
}
}
};
// Where we need explicitly to start and end the timer, this version can be used
// Since these timers normally aren't nicely scoped, so don't have a good place
// to live on the stack of the thread, they're more work to use.
class explicitTimer {
timeStat *stat;
timer_e timerEnumValue;
tsc_tick_count startTime;
tsc_tick_count pauseStartTime;
tsc_tick_count::tsc_interval_t totalPauseTime;
public:
explicitTimer(timeStat *s, timer_e te)
: stat(s), timerEnumValue(te), startTime(), pauseStartTime(0),
totalPauseTime() {}
// void setStat(timeStat *s) { stat = s; }
void start(tsc_tick_count tick);
void pause(tsc_tick_count tick) { pauseStartTime = tick; }
void resume(tsc_tick_count tick) {
totalPauseTime += (tick - pauseStartTime);
}
void stop(tsc_tick_count tick, kmp_stats_list *stats_ptr = nullptr);
void reset() {
startTime = 0;
pauseStartTime = 0;
totalPauseTime = 0;
}
timer_e get_type() const { return timerEnumValue; }
};
// Where you need to partition a threads clock ticks into separate states
// e.g., a partitionedTimers class with two timers of EXECUTING_TASK, and
// DOING_NOTHING would render these conditions:
// time(EXECUTING_TASK) + time(DOING_NOTHING) = total time thread is alive
// No clock tick in the EXECUTING_TASK is a member of DOING_NOTHING and vice
// versa
class partitionedTimers {
private:
std::vector<explicitTimer> timer_stack;
public:
partitionedTimers();
void init(explicitTimer timer);
void exchange(explicitTimer timer);
void push(explicitTimer timer);
void pop();
void windup();
};
// Special wrapper around the partitioned timers to aid timing code blocks
// It avoids the need to have an explicit end, leaving the scope suffices.
class blockPartitionedTimer {
partitionedTimers *part_timers;
public:
blockPartitionedTimer(partitionedTimers *pt, explicitTimer timer)
: part_timers(pt) {
part_timers->push(timer);
}
~blockPartitionedTimer() { part_timers->pop(); }
};
// Special wrapper around the thread state to aid in keeping state in code
// blocks It avoids the need to have an explicit end, leaving the scope
// suffices.
class blockThreadState {
stats_state_e *state_pointer;
stats_state_e old_state;
public:
blockThreadState(stats_state_e *thread_state_pointer, stats_state_e new_state)
: state_pointer(thread_state_pointer), old_state(*thread_state_pointer) {
*state_pointer = new_state;
}
~blockThreadState() { *state_pointer = old_state; }
};
// If all you want is a count, then you can use this...
// The individual per-thread counts will be aggregated into a statistic at
// program exit.
class counter {
uint64_t value;
static const statInfo counterInfo[];
public:
counter() : value(0) {}
void increment() { value++; }
uint64_t getValue() const { return value; }
void reset() { value = 0; }
static const char *name(counter_e e) { return counterInfo[e].name; }
static bool masterOnly(counter_e e) {
return counterInfo[e].flags & stats_flags_e::onlyInMaster;
}
};
/* ****************************************************************
Class to implement an event
There are four components to an event: start time, stop time
nest_level, and timer_name.
The start and stop time should be obvious (recorded in clock ticks).
The nest_level relates to the bar width in the timeline graph.
The timer_name is used to determine which timer event triggered this event.
the interface to this class is through four read-only operations:
1) getStart() -- returns the start time as 64 bit integer
2) getStop() -- returns the stop time as 64 bit integer
3) getNestLevel() -- returns the nest level of the event
4) getTimerName() -- returns the timer name that triggered event
*MORE ON NEST_LEVEL*
The nest level is used in the bar graph that represents the timeline.
Its main purpose is for showing how events are nested inside eachother.
For example, say events, A, B, and C are recorded. If the timeline
looks like this:
Begin -------------------------------------------------------------> Time
| | | | | |
A B C C B A
start start start end end end
Then A, B, C will have a nest level of 1, 2, 3 respectively.
These values are then used to calculate the barwidth so you can
see that inside A, B has occurred, and inside B, C has occurred.
Currently, this is shown with A's bar width being larger than B's
bar width, and B's bar width being larger than C's bar width.
**************************************************************** */
class kmp_stats_event {
uint64_t start;
uint64_t stop;
int nest_level;
timer_e timer_name;
public:
kmp_stats_event()
: start(0), stop(0), nest_level(0), timer_name(TIMER_LAST) {}
kmp_stats_event(uint64_t strt, uint64_t stp, int nst, timer_e nme)
: start(strt), stop(stp), nest_level(nst), timer_name(nme) {}
inline uint64_t getStart() const { return start; }
inline uint64_t getStop() const { return stop; }
inline int getNestLevel() const { return nest_level; }
inline timer_e getTimerName() const { return timer_name; }
};
/* ****************************************************************
Class to implement a dynamically expandable array of events
---------------------------------------------------------
| event 1 | event 2 | event 3 | event 4 | ... | event N |
---------------------------------------------------------
An event is pushed onto the back of this array at every
explicitTimer->stop() call. The event records the thread #,
start time, stop time, and nest level related to the bar width.
The event vector starts at size INIT_SIZE and grows (doubles in size)
if needed. An implication of this behavior is that log(N)
reallocations are needed (where N is number of events). If you want
to avoid reallocations, then set INIT_SIZE to a large value.
the interface to this class is through six operations:
1) reset() -- sets the internal_size back to 0 but does not deallocate any
memory
2) size() -- returns the number of valid elements in the vector
3) push_back(start, stop, nest, timer_name) -- pushes an event onto
the back of the array
4) deallocate() -- frees all memory associated with the vector
5) sort() -- sorts the vector by start time
6) operator[index] or at(index) -- returns event reference at that index
**************************************************************** */
class kmp_stats_event_vector {
kmp_stats_event *events;
int internal_size;
int allocated_size;
static const int INIT_SIZE = 1024;
public:
kmp_stats_event_vector() {
events =
(kmp_stats_event *)__kmp_allocate(sizeof(kmp_stats_event) * INIT_SIZE);
internal_size = 0;
allocated_size = INIT_SIZE;
}
~kmp_stats_event_vector() {}
inline void reset() { internal_size = 0; }
inline int size() const { return internal_size; }
void push_back(uint64_t start_time, uint64_t stop_time, int nest_level,
timer_e name) {
int i;
if (internal_size == allocated_size) {
kmp_stats_event *tmp = (kmp_stats_event *)__kmp_allocate(
sizeof(kmp_stats_event) * allocated_size * 2);
for (i = 0; i < internal_size; i++)
tmp[i] = events[i];
__kmp_free(events);
events = tmp;
allocated_size *= 2;
}
events[internal_size] =
kmp_stats_event(start_time, stop_time, nest_level, name);
internal_size++;
return;
}
void deallocate();
void sort();
const kmp_stats_event &operator[](int index) const { return events[index]; }
kmp_stats_event &operator[](int index) { return events[index]; }
const kmp_stats_event &at(int index) const { return events[index]; }
kmp_stats_event &at(int index) { return events[index]; }
};
/* ****************************************************************
Class to implement a doubly-linked, circular, statistics list
|---| ---> |---| ---> |---| ---> |---| ---> ... next
| | | | | | | |
|---| <--- |---| <--- |---| <--- |---| <--- ... prev
Sentinel first second third
Node node node node
The Sentinel Node is the user handle on the list.
The first node corresponds to thread 0's statistics.
The second node corresponds to thread 1's statistics and so on...
Each node has a _timers, _counters, and _explicitTimers array to hold that
thread's statistics. The _explicitTimers point to the correct _timer and
update its statistics at every stop() call. The explicitTimers' pointers are
set up in the constructor. Each node also has an event vector to hold that
thread's timing events. The event vector expands as necessary and records
the start-stop times for each timer.
The nestLevel variable is for plotting events and is related
to the bar width in the timeline graph.
Every thread will have a thread local pointer to its node in
the list. The sentinel node is used by the primary thread to
store "dummy" statistics before __kmp_create_worker() is called.
**************************************************************** */
class kmp_stats_list {
int gtid;
timeStat _timers[TIMER_LAST + 1];
counter _counters[COUNTER_LAST + 1];
explicitTimer thread_life_timer;
partitionedTimers _partitionedTimers;
int _nestLevel; // one per thread
kmp_stats_event_vector _event_vector;
kmp_stats_list *next;
kmp_stats_list *prev;
stats_state_e state;
int thread_is_idle_flag;
public:
kmp_stats_list()
: thread_life_timer(&_timers[TIMER_OMP_worker_thread_life],
TIMER_OMP_worker_thread_life),
_nestLevel(0), _event_vector(), next(this), prev(this), state(IDLE),
thread_is_idle_flag(0) {}
~kmp_stats_list() {}
inline timeStat *getTimer(timer_e idx) { return &_timers[idx]; }
inline counter *getCounter(counter_e idx) { return &_counters[idx]; }
inline partitionedTimers *getPartitionedTimers() {
return &_partitionedTimers;
}
inline timeStat *getTimers() { return _timers; }
inline counter *getCounters() { return _counters; }
inline kmp_stats_event_vector &getEventVector() { return _event_vector; }
inline void startLife() { thread_life_timer.start(tsc_tick_count::now()); }
inline void endLife() { thread_life_timer.stop(tsc_tick_count::now(), this); }
inline void resetEventVector() { _event_vector.reset(); }
inline void incrementNestValue() { _nestLevel++; }
inline int getNestValue() { return _nestLevel; }
inline void decrementNestValue() { _nestLevel--; }
inline int getGtid() const { return gtid; }
inline void setGtid(int newgtid) { gtid = newgtid; }
inline void setState(stats_state_e newstate) { state = newstate; }
inline stats_state_e getState() const { return state; }
inline stats_state_e *getStatePointer() { return &state; }
inline bool isIdle() { return thread_is_idle_flag == 1; }
inline void setIdleFlag() { thread_is_idle_flag = 1; }
inline void resetIdleFlag() { thread_is_idle_flag = 0; }
kmp_stats_list *push_back(int gtid); // returns newly created list node
inline void push_event(uint64_t start_time, uint64_t stop_time,
int nest_level, timer_e name) {
_event_vector.push_back(start_time, stop_time, nest_level, name);
}
void deallocate();
class iterator;
kmp_stats_list::iterator begin();
kmp_stats_list::iterator end();
int size();
class iterator {
kmp_stats_list *ptr;
friend kmp_stats_list::iterator kmp_stats_list::begin();
friend kmp_stats_list::iterator kmp_stats_list::end();
public:
iterator();
~iterator();
iterator operator++();
iterator operator++(int dummy);
iterator operator--();
iterator operator--(int dummy);
bool operator!=(const iterator &rhs);
bool operator==(const iterator &rhs);
kmp_stats_list *operator*() const; // dereference operator
};
};
/* ****************************************************************
Class to encapsulate all output functions and the environment variables
This module holds filenames for various outputs (normal stats, events, plot
file), as well as coloring information for the plot file.
The filenames and flags variables are read from environment variables.
These are read once by the constructor of the global variable
__kmp_stats_output which calls init().
During this init() call, event flags for the timeStat::timerInfo[] global
array are cleared if KMP_STATS_EVENTS is not true (on, 1, yes).
The only interface function that is public is outputStats(heading). This
function should print out everything it needs to, either to files or stderr,
depending on the environment variables described below
ENVIRONMENT VARIABLES:
KMP_STATS_FILE -- if set, all statistics (not events) will be printed to this
file, otherwise, print to stderr
KMP_STATS_THREADS -- if set to "on", then will print per thread statistics to
either KMP_STATS_FILE or stderr
KMP_STATS_PLOT_FILE -- if set, print the ploticus plot file to this filename,
otherwise, the plot file is sent to "events.plt"
KMP_STATS_EVENTS -- if set to "on", then log events, otherwise, don't log
events
KMP_STATS_EVENTS_FILE -- if set, all events are outputted to this file,
otherwise, output is sent to "events.dat"
**************************************************************** */
class kmp_stats_output_module {
public:
struct rgb_color {
float r;
float g;
float b;
};
private:
std::string outputFileName;
static const char *eventsFileName;
static const char *plotFileName;
static int printPerThreadFlag;
static int printPerThreadEventsFlag;
static const rgb_color globalColorArray[];
static rgb_color timerColorInfo[];
void init();
static void setupEventColors();
static void printPloticusFile();
static void printHeaderInfo(FILE *statsOut);
static void printTimerStats(FILE *statsOut, statistic const *theStats,
statistic const *totalStats);
static void printCounterStats(FILE *statsOut, statistic const *theStats);
static void printCounters(FILE *statsOut, counter const *theCounters);
static void printEvents(FILE *eventsOut, kmp_stats_event_vector *theEvents,
int gtid);
static rgb_color getEventColor(timer_e e) { return timerColorInfo[e]; }
static void windupExplicitTimers();
bool eventPrintingEnabled() const { return printPerThreadEventsFlag; }
public:
kmp_stats_output_module() { init(); }
void outputStats(const char *heading);
};
#ifdef __cplusplus
extern "C" {
#endif
void __kmp_stats_init();
void __kmp_stats_fini();
void __kmp_reset_stats();
void __kmp_output_stats(const char *);
void __kmp_accumulate_stats_at_exit(void);
// thread local pointer to stats node within list
extern KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr;
// head to stats list.
extern kmp_stats_list *__kmp_stats_list;
// lock for __kmp_stats_list
extern kmp_tas_lock_t __kmp_stats_lock;
// reference start time
extern tsc_tick_count __kmp_stats_start_time;
// interface to output
extern kmp_stats_output_module __kmp_stats_output;
#ifdef __cplusplus
}
#endif
// Simple, standard interfaces that drop out completely if stats aren't enabled
/*!
* \brief Adds value to specified timer (name).
*
* @param name timer name as specified under the KMP_FOREACH_TIMER() macro
* @param value double precision sample value to add to statistics for the timer
*
* \details Use KMP_COUNT_VALUE(name, value) macro to add a particular value to
* a timer statistics.
*
* @ingroup STATS_GATHERING
*/
#define KMP_COUNT_VALUE(name, value) \
__kmp_stats_thread_ptr->getTimer(TIMER_##name)->addSample((double)value)
/*!
* \brief Increments specified counter (name).
*
* @param name counter name as specified under the KMP_FOREACH_COUNTER() macro
*
* \details Use KMP_COUNT_BLOCK(name, value) macro to increment a statistics
* counter for the executing thread.
*
* @ingroup STATS_GATHERING
*/
#define KMP_COUNT_BLOCK(name) \
__kmp_stats_thread_ptr->getCounter(COUNTER_##name)->increment()
/*!
* \brief Outputs the current thread statistics and reset them.
*
* @param heading_string heading put above the final stats output
*
* \details Explicitly stops all timers and outputs all stats. Environment
* variable, `OMPTB_STATSFILE=filename`, can be used to output the stats to a
* filename instead of stderr. Environment variable,
* `OMPTB_STATSTHREADS=true|undefined`, can be used to output thread specific
* stats. For now the `OMPTB_STATSTHREADS` environment variable can either be
* defined with any value, which will print out thread specific stats, or it can
* be undefined (not specified in the environment) and thread specific stats
* won't be printed. It should be noted that all statistics are reset when this
* macro is called.
*
* @ingroup STATS_GATHERING
*/
#define KMP_OUTPUT_STATS(heading_string) __kmp_output_stats(heading_string)
/*!
* \brief Initializes the partitioned timers to begin with name.
*
* @param name timer which you want this thread to begin with
*
* @ingroup STATS_GATHERING
*/
#define KMP_INIT_PARTITIONED_TIMERS(name) \
__kmp_stats_thread_ptr->getPartitionedTimers()->init(explicitTimer( \
__kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name))
#define KMP_TIME_PARTITIONED_BLOCK(name) \
blockPartitionedTimer __PBLOCKTIME__( \
__kmp_stats_thread_ptr->getPartitionedTimers(), \
explicitTimer(__kmp_stats_thread_ptr->getTimer(TIMER_##name), \
TIMER_##name))
#define KMP_PUSH_PARTITIONED_TIMER(name) \
__kmp_stats_thread_ptr->getPartitionedTimers()->push(explicitTimer( \
__kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name))
#define KMP_POP_PARTITIONED_TIMER() \
__kmp_stats_thread_ptr->getPartitionedTimers()->pop()
#define KMP_EXCHANGE_PARTITIONED_TIMER(name) \
__kmp_stats_thread_ptr->getPartitionedTimers()->exchange(explicitTimer( \
__kmp_stats_thread_ptr->getTimer(TIMER_##name), TIMER_##name))
#define KMP_SET_THREAD_STATE(state_name) \
__kmp_stats_thread_ptr->setState(state_name)
#define KMP_GET_THREAD_STATE() __kmp_stats_thread_ptr->getState()
#define KMP_SET_THREAD_STATE_BLOCK(state_name) \
blockThreadState __BTHREADSTATE__(__kmp_stats_thread_ptr->getStatePointer(), \
state_name)
/*!
* \brief resets all stats (counters to 0, timers to 0 elapsed ticks)
*
* \details Reset all stats for all threads.
*
* @ingroup STATS_GATHERING
*/
#define KMP_RESET_STATS() __kmp_reset_stats()
#if (KMP_DEVELOPER_STATS)
#define KMP_COUNT_DEVELOPER_VALUE(n, v) KMP_COUNT_VALUE(n, v)
#define KMP_COUNT_DEVELOPER_BLOCK(n) KMP_COUNT_BLOCK(n)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) KMP_TIME_PARTITIONED_BLOCK(n)
#define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) KMP_PUSH_PARTITIONED_TIMER(n)
#define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) KMP_POP_PARTITIONED_TIMER(n)
#define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) \
KMP_EXCHANGE_PARTITIONED_TIMER(n)
#else
// Null definitions
#define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0)
#define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0)
#define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#endif
#else // KMP_STATS_ENABLED
// Null definitions
#define KMP_COUNT_VALUE(n, v) ((void)0)
#define KMP_COUNT_BLOCK(n) ((void)0)
#define KMP_OUTPUT_STATS(heading_string) ((void)0)
#define KMP_RESET_STATS() ((void)0)
#define KMP_COUNT_DEVELOPER_VALUE(n, v) ((void)0)
#define KMP_COUNT_DEVELOPER_BLOCK(n) ((void)0)
#define KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(n) ((void)0)
#define KMP_PUSH_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#define KMP_POP_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#define KMP_EXCHANGE_DEVELOPER_PARTITIONED_TIMER(n) ((void)0)
#define KMP_INIT_PARTITIONED_TIMERS(name) ((void)0)
#define KMP_TIME_PARTITIONED_BLOCK(name) ((void)0)
#define KMP_PUSH_PARTITIONED_TIMER(name) ((void)0)
#define KMP_POP_PARTITIONED_TIMER() ((void)0)
#define KMP_SET_THREAD_STATE(state_name) ((void)0)
#define KMP_GET_THREAD_STATE() ((void)0)
#define KMP_SET_THREAD_STATE_BLOCK(state_name) ((void)0)
#endif // KMP_STATS_ENABLED
#endif // KMP_STATS_H
|
update_ops_matrix_diagonal_multi.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
void multi_qubit_diagonal_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* diagonal_element, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// insert index
const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE state_index;
#ifdef _OPENMP
UINT threshold = 14;
if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1);
#pragma omp parallel for
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] *= diagonal_element[y];
}
}
#ifdef _OPENMP
omp_set_num_threads(omp_get_max_threads());
#endif
free((UINT*)sorted_insert_index_list);
free((ITYPE*)matrix_mask_list);
}
void multi_qubit_control_multi_qubit_diagonal_matrix_gate(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count,
const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* diagonal_element, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// insert index
const UINT insert_index_count = target_qubit_index_count + control_qubit_index_count;
UINT* sorted_insert_index_list = create_sorted_ui_list_list(target_qubit_index_list, target_qubit_index_count, control_qubit_index_list, control_qubit_index_count);
// control mask
ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count);
// loop varaibles
const ITYPE loop_dim = dim >> (target_qubit_index_count + control_qubit_index_count);
ITYPE state_index;
#ifdef _OPENMP
UINT threshold = 14;
if (dim < (((ITYPE)1) << threshold)) omp_set_num_threads(1);
#pragma omp parallel for
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < insert_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index);
}
// flip control masks
basis_0 ^= control_mask;
// compute matrix mul
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] *= diagonal_element[y];
}
}
#ifdef _OPENMP
omp_set_num_threads(omp_get_max_threads());
#endif
free(sorted_insert_index_list);
free(matrix_mask_list);
}
|
Optimizer.h | /*
* Optimizer.h
*
* Created by Guido Novati on 30.10.18.
* Copyright 2018 ETH Zurich. All rights reserved.
*
*/
#pragma once
#include <fstream>
#include "Network.h"
struct MomentumSGD
{
const Real eta;
const Real normalization; // 1/batchSize
const Real beta;
const Real lambda;
MomentumSGD(const Real _eta, // Learning rate
const int batchSize,
const Real _beta1,
const Real _beta2,
const Real _lambda
) : eta(_eta), normalization(1./batchSize), beta(_beta1), lambda(_lambda) {}
// perform gradient update for a parameter array:
inline void step (
const int size, // parameter array's size
Real* const param, // parameter array
Real* const grad, // parameter array gradient
Real* const mom1st, // parameter array gradient 1st moment
Real* const mom2nd // parameter array gradient 2nd moment (unused)
) const
{
// DONE: TODO : Compute Momentum SGD update
#pragma omp parallel for schedule(static)
for (int p= 0; p < size; p++)
{
mom1st[p]= beta * mom1st[p] - eta * normalization * grad[p]; // 1st moment update
param[p] += mom1st[p]; // param (e.g. weights, biases) update
param[p] -= eta * lambda * param[p]; // L2 penalization
}
}
};
template<typename Algorithm>
struct Optimizer
{
Network& NET;
const Real eta, beta_1, beta_2, lambda;
// grab the reference to network weights and parameters
std::vector<Params*> & parms = NET.params;
std::vector<Params*> & grads = NET.grads;
// allocate space to store first (and if needed second) moment of the grad
// which will allow us to learn with momentum:
std::vector<Params*> momentum_1st = NET.allocateGrad();
std::vector<Params*> momentum_2nd = NET.allocateGrad();
// counter of gradient step:
size_t step = 0;
// Constructor:
Optimizer(Network& NN, Real LR = .001, // Learning rate. Should be in range [1e-5 to 1e-2]
Real L2penal = 0, // L2 penalization coefficient. Found by exploration.
Real B1 = .900, // Momentum coefficient. Should be in range [.5 to .9]
Real B2 = .999 // Second moment coefficient. Currently not in use.
) :
NET(NN), eta(LR), beta_1(B1), beta_2(B2), lambda(L2penal) {
}
virtual ~Optimizer() {
for (auto& p : momentum_1st)
_dispose_object(p);
for (auto& p : momentum_2nd)
_dispose_object(p);
}
virtual void update(const int batchSize)
{
assert(parms.size() == grads.size());
assert(parms.size() == momentum_1st.size());
assert(parms.size() == momentum_2nd.size());
// Given some learning algorithm..
const Algorithm algo(eta, batchSize, beta_1, beta_2, lambda);
// ... loop over all parameter arrays and compute the update:
#pragma omp parallel for schedule(static)
for (size_t j = 0; j < parms.size(); j++)
{
if (parms[j] == nullptr) continue; //layer does not have parameters
if (parms[j]->nWeights > 0)
{
algo.step(parms[j]->nWeights,
parms[j]->weights, grads[j]->weights,
momentum_1st[j]->weights, momentum_2nd[j]->weights);
grads[j]->clearWeight(); // reset for next step
}
if (parms[j]->nBiases > 0)
{
algo.step(parms[j]->nBiases,
parms[j]->biases, grads[j]->biases,
momentum_1st[j]->biases, momentum_2nd[j]->biases);
grads[j]->clearBias(); // reset for next step
}
}
step++;
}
};
|
naive.h | #ifndef NEIGHBORSEARCHNAIVE_H_
#define NEIGHBORSEARCHNAIVE_H_
#include "utils/pointcloud.h"
#include "utils/vector.h"
namespace dive {
double radius_of_influence = 1e-2;
template <size_t N, typename PointCloudT>
void NeighborSearchNaive(PointCloudT& pcloud) {
// naive neighborsearch algorithm
#pragma omp parallel for
for (omp_iterator i = 0; i < pcloud.size(); i++) {
for (omp_iterator j = 0; j < pcloud.size(); j++) {
if (i == j) continue;
Vector<N> deltar = pcloud.pos[i] - pcloud.pos[j];
double d2 = deltar * deltar;
if (d2 < radius_of_influence * radius_of_influence) pcloud.n_neihgs[i]++;
}
}
}
} // namespace dive
#endif // NEIGHBORSEARCHNAIVE_H_
|
convolutiondepthwise_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static inline signed char float2int8(float v)
{
int int32 = round(v);
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
}
static void convdw3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char *)_kernel + p*9;
int* outptr0 = out;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w*2;
const signed char* r3 = img0 + w*3;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v4.8b, v5.8b}, [%3] \n"
"ld1 {v6.8b, v7.8b}, [%4] \n"
"ld1 {v8.8b, v9.8b}, [%5] \n"
"ld1 {v10.8b, v11.8b}, [%6] \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"add %6, %6, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"ext v18.8b, v10.8b, v11.8b, #1 \n"
"ext v19.8b, v10.8b, v11.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n"// r00
"sshll v12.8h, v12.8b, #0 \n"// r01
"sshll v13.8h, v13.8b, #0 \n"// r02
"sshll v6.8h, v6.8b, #0 \n"// r10
"sshll v14.8h, v14.8b, #0 \n"// r11
"sshll v15.8h, v15.8b, #0 \n"// r12
"sshll v8.8h, v8.8b, #0 \n"// r20
"sshll v16.8h, v16.8b, #0 \n"// r21
"sshll v17.8h, v17.8b, #0 \n"// r22
"sshll v10.8h, v10.8b, #0 \n"// r30
"sshll v18.8h, v18.8b, #0 \n"// r31
"sshll v19.8h, v19.8b, #0 \n"// r32
// r0
"smull v20.4s, v4.4h, %14.h[0] \n"// (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %14.h[0] \n"
"smull v22.4s, v12.4h, %14.h[1] \n"// (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %14.h[1] \n"
"smull v24.4s, v13.4h, %14.h[2] \n"// (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %14.h[2] \n"
// r1
"smull v26.4s, v6.4h, %14.h[0] \n"// (r10 - r17) * k00
"smull2 v27.4s, v6.8h, %14.h[0] \n"
"smull v28.4s, v14.4h, %14.h[1] \n"// (r11 - r18) * k01
"smull2 v29.4s, v14.8h, %14.h[1] \n"
"smull v30.4s, v15.4h, %14.h[2] \n"// (r12 - r19) * k02
"smull2 v31.4s, v15.8h, %14.h[2] \n"
"smlal v20.4s, v6.4h, %14.h[3] \n"// (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %14.h[3] \n"
"smlal v22.4s, v14.4h, %15.h[0] \n"// (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %15.h[0] \n"
"smlal v24.4s, v15.4h, %15.h[1] \n"// (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %15.h[1] \n"
// r2
"smlal v26.4s, v8.4h, %14.h[3] \n"// (r20 - r27) * k03
"smlal2 v27.4s, v8.8h, %14.h[3] \n"
"smlal v28.4s, v16.4h, %15.h[0] \n"// (r21 - r28) * k04
"smlal2 v29.4s, v16.8h, %15.h[0] \n"
"smlal v30.4s, v17.4h, %15.h[1] \n"// (r22 - r29) * k05
"smlal2 v31.4s, v17.8h, %15.h[1] \n"
"smlal v20.4s, v8.4h, %15.h[2] \n"// (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %15.h[2] \n"
"smlal v22.4s, v16.4h, %15.h[3] \n"// (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %15.h[3] \n"
"smlal v24.4s, v17.4h, %16.h[0] \n"// (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %16.h[0] \n"
// r3
"smlal v26.4s, v10.4h, %15.h[2] \n"// (r30 - r37) * k06
"smlal2 v27.4s, v10.8h, %15.h[2] \n"
"smlal v28.4s, v18.4h, %15.h[3] \n"// (r31 - r38) * k07
"smlal2 v29.4s, v18.8h, %15.h[3] \n"
"smlal v30.4s, v19.4h, %16.h[0] \n"// (r32 - r39) * k08
"smlal2 v31.4s, v19.8h, %16.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v26.4s, v26.4s, v28.4s \n"
"add v27.4s, v27.4s, v29.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"add v26.4s, v26.4s, v30.4s \n"
"add v27.4s, v27.4s, v31.4s \n"
"st1 {v20.4s, v21.4s}, [%1], #32 \n"
"st1 {v26.4s, v27.4s}, [%2], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx) // %16
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%3] \n"// r0
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r00
"vmovl.s8 q5, d10 \n"// r01
"vmovl.s8 q6, d12 \n"// r02
// sum0
"vmull.s16 q7, d30, %P14[0] \n"// (r00 - r07) * k00
"vmull.s16 q8, d31, %P14[0] \n"
"vmull.s16 q9, d10, %P14[1] \n"// (r01 - r08) * k01
"vmull.s16 q10, d11, %P14[1] \n"
"vmlal.s16 q7, d12, %P14[2] \n"// (r02 - r09) * k02
"vmlal.s16 q8, d13, %P14[2] \n"
// r1
"vld1.s8 {d30-d31}, [%4] \n"// r1
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r10
"vmovl.s8 q5, d10 \n"// r11
"vmovl.s8 q6, d12 \n"// r12
// sum0
"vmlal.s16 q7, d30, %P14[3] \n"// (r10 - r17) * k03
"vmlal.s16 q8, d31, %P14[3] \n"
"vmlal.s16 q9, d10, %P15[0] \n"// (r11 - r18) * k04
"vmlal.s16 q10, d11, %P15[0] \n"
"vmlal.s16 q7, d12, %P15[1] \n"// (r12 - r19) * k05
"vmlal.s16 q8, d13, %P15[1] \n"
// sum1
"vmull.s16 q11, d30, %P14[0] \n"// (r10 - r17) * k00
"vmull.s16 q12, d31, %P14[0] \n"
"vmull.s16 q13, d10, %P14[1] \n"// (r11 - r18) * k01
"vmull.s16 q14, d11, %P14[1] \n"
"vmlal.s16 q11, d12, %P14[2] \n"// (r12 - r19) * k02
"vmlal.s16 q12, d13, %P14[2] \n"
// r2
"vld1.s8 {d30-d31}, [%5] \n"// r2
"add %5, %5, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r20
"vmovl.s8 q5, d10 \n"// r21
"vmovl.s8 q6, d12 \n"// r22
// sum0
"vmlal.s16 q7, d30, %P15[2] \n"// (r20 - r27) * k06
"vmlal.s16 q8, d31, %P15[2] \n"
"vmlal.s16 q9, d10, %P15[3] \n"// (r21 - r28) * k07
"vmlal.s16 q10, d11, %P15[3] \n"
"vmlal.s16 q7, d12, %P16[0] \n"// (r22 - r29) * k08
"vmlal.s16 q8, d13, %P16[0] \n"
// sum1
"vmlal.s16 q11, d30, %P14[3] \n"// (r20 - r27) * k03
"vmlal.s16 q12, d31, %P14[3] \n"
"vmlal.s16 q13, d10, %P15[0] \n"// (r21 - r28) * k04
"vmlal.s16 q14, d11, %P15[0] \n"
"vmlal.s16 q11, d12, %P15[1] \n"// (r22 - r29) * k05
"vmlal.s16 q12, d13, %P15[1] \n"
// r3
"vld1.s8 {d30-d31}, [%6] \n"// r3
"add %6, %6, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r30
"vmovl.s8 q5, d10 \n"// r31
"vmovl.s8 q6, d12 \n"// r32
// sum1
"vmlal.s16 q11, d30, %P15[2] \n"// (r30 - r37) * k06
"vmlal.s16 q12, d31, %P15[2] \n"
"vmlal.s16 q13, d10, %P15[3] \n"// (r31 - r38) * k07
"vmlal.s16 q14, d11, %P15[3] \n"
"vmlal.s16 q11, d12, %P16[0] \n"// (r32 - r39) * k08
"vmlal.s16 q12, d13, %P16[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vadd.s32 q11, q11, q13 \n"
"vadd.s32 q12, q12, q14 \n"
"vst1.s32 {d14-d17}, [%1]! \n"
"vst1.s32 {d22-d25}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx) // %16
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
// TODO NEON
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel[0];
sum0 += (int)r0[1] * kernel[1];
sum0 += (int)r0[2] * kernel[2];
sum0 += (int)r1[0] * kernel[3];
sum0 += (int)r1[1] * kernel[4];
sum0 += (int)r1[2] * kernel[5];
sum0 += (int)r2[0] * kernel[6];
sum0 += (int)r2[1] * kernel[7];
sum0 += (int)r2[2] * kernel[8];
sum0n += (int)r1[0] * kernel[0];
sum0n += (int)r1[1] * kernel[1];
sum0n += (int)r1[2] * kernel[2];
sum0n += (int)r2[0] * kernel[3];
sum0n += (int)r2[1] * kernel[4];
sum0n += (int)r2[2] * kernel[5];
sum0n += (int)r3[0] * kernel[6];
sum0n += (int)r3[1] * kernel[7];
sum0n += (int)r3[2] * kernel[8];
*outptr0 = sum0;
*outptr0n = sum0n;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v4.8b, v5.8b}, [%2] \n"
"ld1 {v6.8b, v7.8b}, [%3] \n"
"ld1 {v8.8b, v9.8b}, [%4] \n"
"add %2, %2, #8 \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n"// r00
"sshll v12.8h, v12.8b, #0 \n"// r01
"sshll v13.8h, v13.8b, #0 \n"// r02
"sshll v6.8h, v6.8b, #0 \n"// r10
"sshll v14.8h, v14.8b, #0 \n"// r11
"sshll v15.8h, v15.8b, #0 \n"// r12
"sshll v8.8h, v8.8b, #0 \n"// r20
"sshll v16.8h, v16.8b, #0 \n"// r21
"sshll v17.8h, v17.8b, #0 \n"// r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n"// (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v12.4h, %10.h[1] \n"// (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %10.h[1] \n"
"smull v24.4s, v13.4h, %10.h[2] \n"// (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v6.4h, %10.h[3] \n"// (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %10.h[3] \n"
"smlal v22.4s, v14.4h, %11.h[0] \n"// (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %11.h[0] \n"
"smlal v24.4s, v15.4h, %11.h[1] \n"// (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v8.4h, %11.h[2] \n"// (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %11.h[2] \n"
"smlal v22.4s, v16.4h, %11.h[3] \n"// (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %11.h[3] \n"
"smlal v24.4s, v17.4h, %12.h[0] \n"// (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"st1 {v20.4s, v21.4s}, [%1], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%2] \n"// r0
"add %2, %2, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r00
"vmovl.s8 q5, d10 \n"// r01
"vmovl.s8 q6, d12 \n"// r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n"// (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n"// (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n"// (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld1.s8 {d30-d31}, [%3] \n"// r1
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r10
"vmovl.s8 q5, d10 \n"// r11
"vmovl.s8 q6, d12 \n"// r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n"// (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n"// (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n"// (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld1.s8 {d30-d31}, [%4] \n"// r2
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r20
"vmovl.s8 q5, d10 \n"// r21
"vmovl.s8 q6, d12 \n"// r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n"// (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n"// (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n"// (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vst1.s32 {d14-d17}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr0 = sum;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char*)_kernel + p*9;
int* outptr = out;
const signed char* img = bottom_blob.channel(p);
const signed char* r0 = img;
const signed char* r1 = img + w;
const signed char* r2 = img + w*2;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld2 {v4.8b, v5.8b}, [%2], #16 \n"
"ld2 {v6.8b, v7.8b}, [%2] \n"
"ld2 {v8.8b, v9.8b}, [%3], #16 \n"
"ld2 {v10.8b, v11.8b}, [%3] \n"
"ld2 {v12.8b, v13.8b}, [%4], #16 \n"
"ld2 {v14.8b, v15.8b}, [%4] \n"
"ext v6.8b, v4.8b, v6.8b, #1 \n"
"ext v10.8b, v8.8b, v10.8b, #1 \n"
"ext v14.8b, v12.8b, v14.8b, #1 \n"
"sshll v4.8h, v4.8b, #0 \n"// r00
"sshll v5.8h, v5.8b, #0 \n"// r01
"sshll v6.8h, v6.8b, #0 \n"// r02
"sshll v8.8h, v8.8b, #0 \n"// r10
"sshll v9.8h, v9.8b, #0 \n"// r11
"sshll v10.8h, v10.8b, #0 \n"// r12
"sshll v12.8h, v12.8b, #0 \n"// r20
"sshll v13.8h, v13.8b, #0 \n"// r21
"sshll v14.8h, v14.8b, #0 \n"// r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n"// (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v5.4h, %10.h[1] \n"// (r01 - r08) * k01
"smull2 v23.4s, v5.8h, %10.h[1] \n"
"smull v24.4s, v6.4h, %10.h[2] \n"// (r02 - r09) * k02
"smull2 v25.4s, v6.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v8.4h, %10.h[3] \n"// (r10 - r17) * k03
"smlal2 v21.4s, v8.8h, %10.h[3] \n"
"smlal v22.4s, v9.4h, %11.h[0] \n"// (r11 - r18) * k04
"smlal2 v23.4s, v9.8h, %11.h[0] \n"
"smlal v24.4s, v10.4h, %11.h[1] \n"// (r12 - r19) * k05
"smlal2 v25.4s, v10.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v12.4h, %11.h[2] \n"// (r20 - r27) * k06
"smlal2 v21.4s, v12.8h, %11.h[2] \n"
"smlal v22.4s, v13.4h, %11.h[3] \n"// (r21 - r28) * k07
"smlal2 v23.4s, v13.8h, %11.h[3] \n"
"smlal v24.4s, v14.4h, %12.h[0] \n"// (r22 - r29) * k08
"smlal2 v25.4s, v14.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"st1 {v20.4s, v21.4s}, [%1], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld2.s8 {d30-d31}, [%2]! \n"// r0
"vld2.s8 {d10-d11}, [%2] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n"// r01
"vmovl.s8 q15, d30 \n"// r00
"vmovl.s8 q6, d12 \n"// r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n"// (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n"// (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n"// (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld2.s8 {d30-d31}, [%3]! \n"// r1
"vld2.s8 {d10-d11}, [%3] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n"// r11
"vmovl.s8 q15, d30 \n"// r10
"vmovl.s8 q6, d12 \n"// r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n"// (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n"// (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n"// (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld2.s8 {d30-d31}, [%4]! \n"// r2
"vld2.s8 {d10-d11}, [%4] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n"// r21
"vmovl.s8 q15, d30 \n"// r20
"vmovl.s8 q6, d12 \n"// r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n"// (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n"// (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n"// (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vst1.s32 {d14-d17}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
static void convdw3x3s1_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2*p];
const float scale_requant_out = scales_requant[2*p+1];
const signed char* kernel = (const signed char *)_kernel + p*9;
signed char* outptr0 = out;
signed char* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w*2;
const signed char* r3 = img0 + w*3;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v4.8b, v5.8b}, [%3] \n"
"ld1 {v6.8b, v7.8b}, [%4] \n"
"ld1 {v8.8b, v9.8b}, [%5] \n"
"ld1 {v10.8b, v11.8b}, [%6] \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"add %6, %6, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"ext v18.8b, v10.8b, v11.8b, #1 \n"
"ext v19.8b, v10.8b, v11.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n"// r00
"sshll v12.8h, v12.8b, #0 \n"// r01
"sshll v13.8h, v13.8b, #0 \n"// r02
"sshll v6.8h, v6.8b, #0 \n"// r10
"sshll v14.8h, v14.8b, #0 \n"// r11
"sshll v15.8h, v15.8b, #0 \n"// r12
"sshll v8.8h, v8.8b, #0 \n"// r20
"sshll v16.8h, v16.8b, #0 \n"// r21
"sshll v17.8h, v17.8b, #0 \n"// r22
"sshll v10.8h, v10.8b, #0 \n"// r30
"sshll v18.8h, v18.8b, #0 \n"// r31
"sshll v19.8h, v19.8b, #0 \n"// r32
// r0
"smull v20.4s, v4.4h, %14.h[0] \n"// (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %14.h[0] \n"
"smull v22.4s, v12.4h, %14.h[1] \n"// (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %14.h[1] \n"
"smull v24.4s, v13.4h, %14.h[2] \n"// (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %14.h[2] \n"
// r1
"smull v26.4s, v6.4h, %14.h[0] \n"// (r10 - r17) * k00
"smull2 v27.4s, v6.8h, %14.h[0] \n"
"smull v28.4s, v14.4h, %14.h[1] \n"// (r11 - r18) * k01
"smull2 v29.4s, v14.8h, %14.h[1] \n"
"smull v30.4s, v15.4h, %14.h[2] \n"// (r12 - r19) * k02
"smull2 v31.4s, v15.8h, %14.h[2] \n"
"smlal v20.4s, v6.4h, %14.h[3] \n"// (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %14.h[3] \n"
"smlal v22.4s, v14.4h, %15.h[0] \n"// (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %15.h[0] \n"
"smlal v24.4s, v15.4h, %15.h[1] \n"// (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %15.h[1] \n"
// r2
"smlal v26.4s, v8.4h, %14.h[3] \n"// (r20 - r27) * k03
"smlal2 v27.4s, v8.8h, %14.h[3] \n"
"smlal v28.4s, v16.4h, %15.h[0] \n"// (r21 - r28) * k04
"smlal2 v29.4s, v16.8h, %15.h[0] \n"
"smlal v30.4s, v17.4h, %15.h[1] \n"// (r22 - r29) * k05
"smlal2 v31.4s, v17.8h, %15.h[1] \n"
"smlal v20.4s, v8.4h, %15.h[2] \n"// (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %15.h[2] \n"
"smlal v22.4s, v16.4h, %15.h[3] \n"// (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %15.h[3] \n"
"smlal v24.4s, v17.4h, %16.h[0] \n"// (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %16.h[0] \n"
// r3
"smlal v26.4s, v10.4h, %15.h[2] \n"// (r30 - r37) * k06
"smlal2 v27.4s, v10.8h, %15.h[2] \n"
"smlal v28.4s, v18.4h, %15.h[3] \n"// (r31 - r38) * k07
"smlal2 v29.4s, v18.8h, %15.h[3] \n"
"smlal v30.4s, v19.4h, %16.h[0] \n"// (r32 - r39) * k08
"smlal2 v31.4s, v19.8h, %16.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v26.4s, v26.4s, v28.4s \n"
"add v27.4s, v27.4s, v29.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"add v26.4s, v26.4s, v30.4s \n"
"add v27.4s, v27.4s, v31.4s \n"
"dup v4.4s, %w17 \n" // bias
"dup v5.4s, %w18 \n" // scale_in
"dup v6.4s, %w19 \n" // scale_out
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
"scvtf v26.4s, v26.4s \n"
"scvtf v27.4s, v27.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, v5.4s \n"
"fmul v21.4s, v21.4s, v5.4s \n"
"fmul v26.4s, v26.4s, v5.4s \n"
"fmul v27.4s, v27.4s, v5.4s \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, v4.4s \n"
"fadd v21.4s, v21.4s, v4.4s \n"
"fadd v26.4s, v26.4s, v4.4s \n"
"fadd v27.4s, v27.4s, v4.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, v6.4s \n"
"fmul v21.4s, v21.4s, v6.4s \n"
"fmul v26.4s, v26.4s, v6.4s \n"
"fmul v27.4s, v27.4s, v6.4s \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
"fcvtas v26.4s, v26.4s \n"
"fcvtas v27.4s, v27.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn v9.4h, v26.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
"sqxtn2 v9.8h, v27.4s \n"
// top_s16 -> top_s8
"sqxtn v8.8b, v7.8h \n"
"sqxtn v10.8b, v9.8h \n"
// save top_s8
"st1 {v8.8b}, [%1], #8 \n"
"st1 {v10.8b}, [%2], #8 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx), // %16
"r"(bias0), // %17
"r"(scale_requant_in), // %18
"r"(scale_requant_out) // %19
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%3] \n"// r0
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r00
"vmovl.s8 q5, d10 \n"// r01
"vmovl.s8 q6, d12 \n"// r02
// sum0
"vmull.s16 q7, d30, %P14[0] \n"// (r00 - r07) * k00
"vmull.s16 q8, d31, %P14[0] \n"
"vmull.s16 q9, d10, %P14[1] \n"// (r01 - r08) * k01
"vmull.s16 q10, d11, %P14[1] \n"
"vmlal.s16 q7, d12, %P14[2] \n"// (r02 - r09) * k02
"vmlal.s16 q8, d13, %P14[2] \n"
// r1
"vld1.s8 {d30-d31}, [%4] \n"// r1
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r10
"vmovl.s8 q5, d10 \n"// r11
"vmovl.s8 q6, d12 \n"// r12
// sum0
"vmlal.s16 q7, d30, %P14[3] \n"// (r10 - r17) * k03
"vmlal.s16 q8, d31, %P14[3] \n"
"vmlal.s16 q9, d10, %P15[0] \n"// (r11 - r18) * k04
"vmlal.s16 q10, d11, %P15[0] \n"
"vmlal.s16 q7, d12, %P15[1] \n"// (r12 - r19) * k05
"vmlal.s16 q8, d13, %P15[1] \n"
// sum1
"vmull.s16 q11, d30, %P14[0] \n"// (r10 - r17) * k00
"vmull.s16 q12, d31, %P14[0] \n"
"vmull.s16 q13, d10, %P14[1] \n"// (r11 - r18) * k01
"vmull.s16 q14, d11, %P14[1] \n"
"vmlal.s16 q11, d12, %P14[2] \n"// (r12 - r19) * k02
"vmlal.s16 q12, d13, %P14[2] \n"
// r2
"vld1.s8 {d30-d31}, [%5] \n"// r2
"add %5, %5, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r20
"vmovl.s8 q5, d10 \n"// r21
"vmovl.s8 q6, d12 \n"// r22
// sum0
"vmlal.s16 q7, d30, %P15[2] \n"// (r20 - r27) * k06
"vmlal.s16 q8, d31, %P15[2] \n"
"vmlal.s16 q9, d10, %P15[3] \n"// (r21 - r28) * k07
"vmlal.s16 q10, d11, %P15[3] \n"
"vmlal.s16 q7, d12, %P16[0] \n"// (r22 - r29) * k08
"vmlal.s16 q8, d13, %P16[0] \n"
// sum1
"vmlal.s16 q11, d30, %P14[3] \n"// (r20 - r27) * k03
"vmlal.s16 q12, d31, %P14[3] \n"
"vmlal.s16 q13, d10, %P15[0] \n"// (r21 - r28) * k04
"vmlal.s16 q14, d11, %P15[0] \n"
"vmlal.s16 q11, d12, %P15[1] \n"// (r22 - r29) * k05
"vmlal.s16 q12, d13, %P15[1] \n"
// r3
"vld1.s8 {d30-d31}, [%6] \n"// r3
"add %6, %6, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r30
"vmovl.s8 q5, d10 \n"// r31
"vmovl.s8 q6, d12 \n"// r32
// sum1
"vmlal.s16 q11, d30, %P15[2] \n"// (r30 - r37) * k06
"vmlal.s16 q12, d31, %P15[2] \n"
"vmlal.s16 q13, d10, %P15[3] \n"// (r31 - r38) * k07
"vmlal.s16 q14, d11, %P15[3] \n"
"vmlal.s16 q11, d12, %P16[0] \n"// (r32 - r39) * k08
"vmlal.s16 q12, d13, %P16[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vadd.s32 q11, q11, q13 \n"
"vadd.s32 q12, q12, q14 \n"
"vdup.f32 q13, %17 \n" // bias
"vdup.f32 q14, %18 \n" // scale_in
"vdup.f32 q15, %19 \n" // scale_out
// top_s32 -> top_f32
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q7, q14 \n"
"vmul.f32 q4, q8, q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q13 \n"
"vadd.f32 q4, q4, q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q15 \n"
"vmul.f32 q4, q4, q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%1]! \n"
// top_s32 -> top_f32
"vcvt.f32.s32 q11, q11 \n"
"vcvt.f32.s32 q12, q12 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q11, q14 \n"
"vmul.f32 q4, q12, q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q13 \n"
"vadd.f32 q4, q4, q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q15 \n"
"vmul.f32 q4, q4, q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx), // %16
"r"(bias0), // %17
"r"(scale_requant_in), // %18
"r"(scale_requant_out) // %19
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
// TODO NEON
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel[0];
sum0 += (int)r0[1] * kernel[1];
sum0 += (int)r0[2] * kernel[2];
sum0 += (int)r1[0] * kernel[3];
sum0 += (int)r1[1] * kernel[4];
sum0 += (int)r1[2] * kernel[5];
sum0 += (int)r2[0] * kernel[6];
sum0 += (int)r2[1] * kernel[7];
sum0 += (int)r2[2] * kernel[8];
sum0n += (int)r1[0] * kernel[0];
sum0n += (int)r1[1] * kernel[1];
sum0n += (int)r1[2] * kernel[2];
sum0n += (int)r2[0] * kernel[3];
sum0n += (int)r2[1] * kernel[4];
sum0n += (int)r2[2] * kernel[5];
sum0n += (int)r3[0] * kernel[6];
sum0n += (int)r3[1] * kernel[7];
sum0n += (int)r3[2] * kernel[8];
*outptr0 = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
*outptr0n = float2int8(((float)sum0n * scale_requant_in + bias0) * scale_requant_out);
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"dup v26.4s, %w13 \n"
"dup v27.4s, %w14 \n"
"dup v28.4s, %w15 \n"
"0: \n"
"ld1 {v4.8b, v5.8b}, [%2] \n"
"ld1 {v6.8b, v7.8b}, [%3] \n"
"ld1 {v8.8b, v9.8b}, [%4] \n"
"add %2, %2, #8 \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n"// r00
"sshll v12.8h, v12.8b, #0 \n"// r01
"sshll v13.8h, v13.8b, #0 \n"// r02
"sshll v6.8h, v6.8b, #0 \n"// r10
"sshll v14.8h, v14.8b, #0 \n"// r11
"sshll v15.8h, v15.8b, #0 \n"// r12
"sshll v8.8h, v8.8b, #0 \n"// r20
"sshll v16.8h, v16.8b, #0 \n"// r21
"sshll v17.8h, v17.8b, #0 \n"// r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n"// (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v12.4h, %10.h[1] \n"// (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %10.h[1] \n"
"smull v24.4s, v13.4h, %10.h[2] \n"// (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v6.4h, %10.h[3] \n"// (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %10.h[3] \n"
"smlal v22.4s, v14.4h, %11.h[0] \n"// (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %11.h[0] \n"
"smlal v24.4s, v15.4h, %11.h[1] \n"// (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v8.4h, %11.h[2] \n"// (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %11.h[2] \n"
"smlal v22.4s, v16.4h, %11.h[3] \n"// (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %11.h[3] \n"
"smlal v24.4s, v17.4h, %12.h[0] \n"// (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, v27.4s \n"
"fmul v21.4s, v21.4s, v27.4s \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, v26.4s \n"
"fadd v21.4s, v21.4s, v26.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, v28.4s \n"
"fmul v21.4s, v21.4s, v28.4s \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
// top_s16 -> top_s8
"sqxtn v8.8b, v7.8h \n"
// save top_s8
"st1 {v8.8b}, [%1], #8 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%2] \n"// r0
"add %2, %2, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r00
"vmovl.s8 q5, d10 \n"// r01
"vmovl.s8 q6, d12 \n"// r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n"// (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n"// (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n"// (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld1.s8 {d30-d31}, [%3] \n"// r1
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r10
"vmovl.s8 q5, d10 \n"// r11
"vmovl.s8 q6, d12 \n"// r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n"// (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n"// (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n"// (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld1.s8 {d30-d31}, [%4] \n"// r2
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n"// r20
"vmovl.s8 q5, d10 \n"// r21
"vmovl.s8 q6, d12 \n"// r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n"// (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n"// (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n"// (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vdup.f32 q13, %13 \n" // bias
"vdup.f32 q14, %14 \n" // scale_in
"vdup.f32 q15, %15 \n" // scale_out
// top_s32 -> top_f32
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q7, q14 \n"
"vmul.f32 q4, q8, q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q13 \n"
"vadd.f32 q4, q4, q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q15 \n"
"vmul.f32 q4, q4, q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr0 = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out);
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2*p];
const float scale_requant_out = scales_requant[2*p+1];
const signed char* kernel = (const signed char*)_kernel + p*9;
signed char* outptr = out;
const signed char* img = bottom_blob.channel(p);
const signed char* r0 = img;
const signed char* r1 = img + w;
const signed char* r2 = img + w*2;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"dup v26.4s, %w13 \n"
"dup v27.4s, %w14 \n"
"dup v28.4s, %w15 \n"
"0: \n"
"ld2 {v4.8b, v5.8b}, [%2], #16 \n"
"ld2 {v6.8b, v7.8b}, [%2] \n"
"ld2 {v8.8b, v9.8b}, [%3], #16 \n"
"ld2 {v10.8b, v11.8b}, [%3] \n"
"ld2 {v12.8b, v13.8b}, [%4], #16 \n"
"ld2 {v14.8b, v15.8b}, [%4] \n"
"ext v6.8b, v4.8b, v6.8b, #1 \n"
"ext v10.8b, v8.8b, v10.8b, #1 \n"
"ext v14.8b, v12.8b, v14.8b, #1 \n"
"sshll v4.8h, v4.8b, #0 \n"// r00
"sshll v5.8h, v5.8b, #0 \n"// r01
"sshll v6.8h, v6.8b, #0 \n"// r02
"sshll v8.8h, v8.8b, #0 \n"// r10
"sshll v9.8h, v9.8b, #0 \n"// r11
"sshll v10.8h, v10.8b, #0 \n"// r12
"sshll v12.8h, v12.8b, #0 \n"// r20
"sshll v13.8h, v13.8b, #0 \n"// r21
"sshll v14.8h, v14.8b, #0 \n"// r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n"// (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v5.4h, %10.h[1] \n"// (r01 - r08) * k01
"smull2 v23.4s, v5.8h, %10.h[1] \n"
"smull v24.4s, v6.4h, %10.h[2] \n"// (r02 - r09) * k02
"smull2 v25.4s, v6.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v8.4h, %10.h[3] \n"// (r10 - r17) * k03
"smlal2 v21.4s, v8.8h, %10.h[3] \n"
"smlal v22.4s, v9.4h, %11.h[0] \n"// (r11 - r18) * k04
"smlal2 v23.4s, v9.8h, %11.h[0] \n"
"smlal v24.4s, v10.4h, %11.h[1] \n"// (r12 - r19) * k05
"smlal2 v25.4s, v10.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v12.4h, %11.h[2] \n"// (r20 - r27) * k06
"smlal2 v21.4s, v12.8h, %11.h[2] \n"
"smlal v22.4s, v13.4h, %11.h[3] \n"// (r21 - r28) * k07
"smlal2 v23.4s, v13.8h, %11.h[3] \n"
"smlal v24.4s, v14.4h, %12.h[0] \n"// (r22 - r29) * k08
"smlal2 v25.4s, v14.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, v27.4s \n"
"fmul v21.4s, v21.4s, v27.4s \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, v26.4s \n"
"fadd v21.4s, v21.4s, v26.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, v28.4s \n"
"fmul v21.4s, v21.4s, v28.4s \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
// top_s16 -> top_s8
"sqxtn v8.8b, v7.8h \n"
// save top_s8
"st1 {v8.8b}, [%1], #8 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld2.s8 {d30-d31}, [%2]! \n"// r0
"vld2.s8 {d10-d11}, [%2] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n"// r01
"vmovl.s8 q15, d30 \n"// r00
"vmovl.s8 q6, d12 \n"// r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n"// (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n"// (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n"// (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld2.s8 {d30-d31}, [%3]! \n"// r1
"vld2.s8 {d10-d11}, [%3] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n"// r11
"vmovl.s8 q15, d30 \n"// r10
"vmovl.s8 q6, d12 \n"// r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n"// (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n"// (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n"// (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld2.s8 {d30-d31}, [%4]! \n"// r2
"vld2.s8 {d10-d11}, [%4] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n"// r21
"vmovl.s8 q15, d30 \n"// r20
"vmovl.s8 q6, d12 \n"// r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n"// (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n"// (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n"// (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vdup.f32 q11, %13 \n" // bias
"vdup.f32 q12, %14 \n" // scale_in
"vdup.f32 q13, %15 \n" // scale_out
// top_s32 -> top_f32
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q7, q12 \n"
"vmul.f32 q4, q8, q12 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q11 \n"
"vadd.f32 q4, q4, q11 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q13 \n"
"vmul.f32 q4, q4, q13 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out);
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
DRB108-atomic-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
/*
* Test if atomic can be recognized properly. No data races.
* */
#include <omp.h>
int main()
{
int a = 0;
#pragma omp parallel for reduction (+:a)
for (int i = 0; i <= 99; i += 1) {
a += 1;
}
printf("a=%d\n",a);
return 0;
}
|
lsh_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
/***********************************************************************
* Author: Vincent Rabaud
*************************************************************************/
#ifndef FLANN_LSH_INDEX_H_
#define FLANN_LSH_INDEX_H_
#include <algorithm>
#include <cassert>
#include <cstring>
#include <map>
#include <vector>
#include "flann/general.h"
#include "flann/algorithms/nn_index.h"
#include "flann/util/matrix.h"
#include "flann/util/result_set.h"
#include "flann/util/heap.h"
#include "flann/util/lsh_table.h"
#include "flann/util/allocator.h"
#include "flann/util/random.h"
#include "flann/util/saving.h"
namespace flann
{
struct LshIndexParams : public IndexParams
{
LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2)
{
(* this)["algorithm"] = FLANN_INDEX_LSH;
// The number of hash tables to use
(*this)["table_number"] = table_number;
// The length of the key in the hash tables
(*this)["key_size"] = key_size;
// Number of levels to use in multi-probe (0 for standard LSH)
(*this)["multi_probe_level"] = multi_probe_level;
}
};
/**
* Locality-sensitive hashing index
*
* Contains the tables and other information for indexing a set of points
* for nearest-neighbor matching.
*/
template<typename Distance>
class LshIndex : public NNIndex<Distance>
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
typedef NNIndex<Distance> BaseClass;
/** Constructor
* @param params parameters passed to the LSH algorithm
* @param d the distance used
*/
LshIndex(const IndexParams& params = LshIndexParams(), Distance d = Distance()) :
BaseClass(params, d)
{
table_number_ = get_param<unsigned int>(index_params_,"table_number",12);
key_size_ = get_param<unsigned int>(index_params_,"key_size",20);
multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2);
fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);
}
/** Constructor
* @param input_data dataset with the input features
* @param params parameters passed to the LSH algorithm
* @param d the distance used
*/
LshIndex(const Matrix<ElementType>& input_data, const IndexParams& params = LshIndexParams(), Distance d = Distance()) :
BaseClass(params, d)
{
table_number_ = get_param<unsigned int>(index_params_,"table_number",12);
key_size_ = get_param<unsigned int>(index_params_,"key_size",20);
multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2);
fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);
setDataset(input_data);
}
LshIndex(const LshIndex& other) : BaseClass(other),
tables_(other.tables_),
table_number_(other.table_number_),
key_size_(other.key_size_),
multi_probe_level_(other.multi_probe_level_),
xor_masks_(other.xor_masks_)
{
}
LshIndex& operator=(LshIndex other)
{
this->swap(other);
return *this;
}
virtual ~LshIndex()
{
freeIndex();
}
BaseClass* clone() const
{
return new LshIndex(*this);
}
using BaseClass::buildIndex;
void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
assert(points.cols==veclen_);
size_t old_size = size_;
extendDataset(points);
if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) {
buildIndex();
}
else {
for (unsigned int i = 0; i < table_number_; ++i) {
lsh::LshTable<ElementType>& table = tables_[i];
for (size_t i=old_size;i<size_;++i) {
table.add(i, points_[i]);
}
}
}
}
flann_algorithm_t getType() const
{
return FLANN_INDEX_LSH;
}
template<typename Archive>
void serialize(Archive& ar)
{
ar.setObject(this);
ar & *static_cast<NNIndex<Distance>*>(this);
ar & table_number_;
ar & key_size_;
ar & multi_probe_level_;
ar & xor_masks_;
ar & tables_;
if (Archive::is_loading::value) {
index_params_["algorithm"] = getType();
index_params_["table_number"] = table_number_;
index_params_["key_size"] = key_size_;
index_params_["multi_probe_level"] = multi_probe_level_;
}
}
void saveIndex(FILE* stream)
{
serialization::SaveArchive sa(stream);
sa & *this;
}
void loadIndex(FILE* stream)
{
serialization::LoadArchive la(stream);
la & *this;
}
/**
* Computes the index memory usage
* Returns: memory used by the index
*/
int usedMemory() const
{
return size_ * sizeof(int);
}
/**
* \brief Perform k-nearest neighbor search
* \param[in] queries The query points for which to find the nearest neighbors
* \param[out] indices The indices of the nearest neighbors found
* \param[out] dists Distances to the nearest neighbors found
* \param[in] knn Number of nearest neighbors to return
* \param[in] params Search parameters
*/
int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
int count = 0;
if (params.use_heap==FLANN_True) {
#pragma omp parallel num_threads(params.cores)
{
KNNUniqueResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
return count;
}
/**
* \brief Perform k-nearest neighbor search
* \param[in] queries The query points for which to find the nearest neighbors
* \param[out] indices The indices of the nearest neighbors found
* \param[out] dists Distances to the nearest neighbors found
* \param[in] knn Number of nearest neighbors to return
* \param[in] params Search parameters
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int count = 0;
if (params.use_heap==FLANN_True) {
#pragma omp parallel num_threads(params.cores)
{
KNNUniqueResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
return count;
}
/**
* Find set of nearest neighbors to vec. Their indices are stored inside
* the result object.
*
* Params:
* result = the result object in which the indices of the nearest-neighbors are stored
* vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/) const
{
getNeighbors(vec, result);
}
protected:
/**
* Builds the index
*/
void buildIndexImpl()
{
tables_.resize(table_number_);
std::vector<std::pair<size_t,ElementType*> > features;
features.reserve(points_.size());
for (size_t i=0;i<points_.size();++i) {
features.push_back(std::make_pair(i, points_[i]));
}
for (unsigned int i = 0; i < table_number_; ++i) {
lsh::LshTable<ElementType>& table = tables_[i];
table = lsh::LshTable<ElementType>(veclen_, key_size_);
// Add the features to the table
table.add(features);
}
}
void freeIndex()
{
/* nothing to do here */
}
private:
/** Defines the comparator on score and index
*/
typedef std::pair<float, unsigned int> ScoreIndexPair;
struct SortScoreIndexPairOnSecond
{
bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const
{
return left.second < right.second;
}
};
/** Fills the different xor masks to use when getting the neighbors in multi-probe LSH
* @param key the key we build neighbors from
* @param lowest_index the lowest index of the bit set
* @param level the multi-probe level we are at
* @param xor_masks all the xor mask
*/
void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level,
std::vector<lsh::BucketKey>& xor_masks)
{
xor_masks.push_back(key);
if (level == 0) return;
for (int index = lowest_index - 1; index >= 0; --index) {
// Create a new key
lsh::BucketKey new_key = key | (lsh::BucketKey(1) << index);
fill_xor_mask(new_key, index, level - 1, xor_masks);
}
}
/** Performs the approximate nearest-neighbor search.
* @param vec the feature to analyze
* @param do_radius flag indicating if we check the radius too
* @param radius the radius if it is a radius search
* @param do_k flag indicating if we limit the number of nn
* @param k_nn the number of nearest neighbors
* @param checked_average used for debugging
*/
void getNeighbors(const ElementType* vec, bool do_radius, float radius, bool do_k, unsigned int k_nn,
float& checked_average)
{
static std::vector<ScoreIndexPair> score_index_heap;
if (do_k) {
unsigned int worst_score = std::numeric_limits<unsigned int>::max();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
for (; table != table_end; ++table) {
size_t key = table->getKey(vec);
std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
for (; xor_mask != xor_mask_end; ++xor_mask) {
size_t sub_key = key ^ (*xor_mask);
const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
if (bucket == 0) continue;
// Go over each descriptor index
std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
DistanceType hamming_distance;
// Process the rest of the candidates
for (; training_index < last_training_index; ++training_index) {
if (removed_ && removed_points_.test(*training_index)) continue;
hamming_distance = distance_(vec, points_[*training_index].point, veclen_);
if (hamming_distance < worst_score) {
// Insert the new element
score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));
std::push_heap(score_index_heap.begin(), score_index_heap.end());
if (score_index_heap.size() > (unsigned int)k_nn) {
// Remove the highest distance value as we have too many elements
std::pop_heap(score_index_heap.begin(), score_index_heap.end());
score_index_heap.pop_back();
// Keep track of the worst score
worst_score = score_index_heap.front().first;
}
}
}
}
}
}
else {
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
for (; table != table_end; ++table) {
size_t key = table->getKey(vec);
std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
for (; xor_mask != xor_mask_end; ++xor_mask) {
size_t sub_key = key ^ (*xor_mask);
const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
if (bucket == 0) continue;
// Go over each descriptor index
std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
DistanceType hamming_distance;
// Process the rest of the candidates
for (; training_index < last_training_index; ++training_index) {
if (removed_ && removed_points_.test(*training_index)) continue;
// Compute the Hamming distance
hamming_distance = distance_(vec, points_[*training_index].point, veclen_);
if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));
}
}
}
}
}
/** Performs the approximate nearest-neighbor search.
* This is a slower version than the above as it uses the ResultSet
* @param vec the feature to analyze
*/
void getNeighbors(const ElementType* vec, ResultSet<DistanceType>& result) const
{
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
for (; table != table_end; ++table) {
size_t key = table->getKey(vec);
std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
for (; xor_mask != xor_mask_end; ++xor_mask) {
size_t sub_key = key ^ (*xor_mask);
const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
if (bucket == 0) continue;
// Go over each descriptor index
std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
DistanceType hamming_distance;
// Process the rest of the candidates
for (; training_index < last_training_index; ++training_index) {
if (removed_ && removed_points_.test(*training_index)) continue;
// Compute the Hamming distance
hamming_distance = distance_(vec, points_[*training_index], veclen_);
result.addPoint(hamming_distance, *training_index);
}
}
}
}
void swap(LshIndex& other)
{
BaseClass::swap(other);
std::swap(tables_, other.tables_);
std::swap(size_at_build_, other.size_at_build_);
std::swap(table_number_, other.table_number_);
std::swap(key_size_, other.key_size_);
std::swap(multi_probe_level_, other.multi_probe_level_);
std::swap(xor_masks_, other.xor_masks_);
}
/** The different hash tables */
std::vector<lsh::LshTable<ElementType> > tables_;
/** table number */
unsigned int table_number_;
/** key size */
unsigned int key_size_;
/** How far should we look for neighbors in multi-probe LSH */
unsigned int multi_probe_level_;
/** The XOR masks to apply to a key to get the neighboring buckets */
std::vector<lsh::BucketKey> xor_masks_;
USING_BASECLASS_SYMBOLS
};
}
#endif //FLANN_LSH_INDEX_H_
|
GcodeLayerThreader.h | /** Copyright (C) 2017 Ultimaker - Released under terms of the AGPLv3 License */
#ifndef GCODE_LAYER_THREADER_H
#define GCODE_LAYER_THREADER_H
#include <queue> // priority_queue
#include <functional> // function
#include <thread> // sleep
#include <chrono> // milliseconds
#include "utils/logoutput.h"
#include "utils/optional.h"
#include "utils/Lock.h"
namespace cura
{
/*!
* Producer Consumer construct for when:
* - production can occur in parallel
* - consumption must be ordered and not multithreaded
*
* A layer_nr index is passed to the item producer in order to produce the different items.
*
* Each thread does production and consumption, giving priority to consumption if some is available.
*
* If there is only one thread, it consumes every time it has produced one item.
*
* \warning This class is only adequate when the expected production time of an item is more than (n_threads - 1) times as much as the expected consumption time of an item
*/
template <typename T>
class GcodeLayerThreader
{
public:
/*!
* \param start_item_argument_index The first value with which to produce an item
* \param end_item_argument_index The last value with which to produce an item
* \param produce_item The function with which to produce an item
* \param consume_item The function with which to consume an item
* \param max_task_count The maximum number of items (being) produced without having been consumed
*/
GcodeLayerThreader(
int start_item_argument_index,
int end_item_argument_index,
const std::function<T* (int)>& produce_item,
const std::function<void (T*)>& consume_item,
const unsigned int max_task_count
);
/*!
* Produce all items and consume them.
*/
void run();
private:
/*!
* Produce an item and put it in \ref GcodeLayerThreader::produced
*
* \param item_argument_index The parameter with which to call \ref GcodeLayerThreader::produce_item
*/
void produce(int item_argument_index);
/*!
* Consume an item from \ref GcodeLayerThreader::produced
*
* \param item_idx The index into \ref GcodeLayerThreader::produced
*/
void consume(int item_idx);
/*!
* Consume if possible, otherwise
* Produce if possible, otherwise
* wait half a second
*/
void act();
/*!
* Check whether no tasks are left for a thread to pick up
*/
bool finished();
private:
// algorithm parameters
const int start_item_argument_index; //!< The first index with which \ref GcodeLayerThreader::produce_item will be called
const int end_item_argument_index; //!< The end index with which \ref GcodeLayerThreader::produce_item will not be called any more
const unsigned int item_count; //!< The number of items to produce and consume
const int max_task_count; //!< The maximum amount of items active in the system
const std::function<T* (int)>& produce_item; //!< The function to produce an item
const std::function<void (T*)>& consume_item; //!< The function to consume an item
// variables which change throughout the computation of the algorithm
std::vector<T*> produced; //!< ordered list for every item to be produced; contains pointers to produced items which aren't consumed yet; rest is nullptr
int last_produced_argument_index; //!< Counter to see which item next to produce
std::optional<int> to_be_consumed_item_idx; //!< The index into \ref GcodeLayerThreader::produced where to find the next item ready to be consumed (if any)
Lock consume_lock; //!< Lock to make sure no two threads consume at the same time
int last_consumed_idx = -1; //!< The index into \ref GcodeLayerThreader::produced for the last item consumed
// statistics
int active_task_count = 0; //!< Number of items active in this system.
};
template <typename T>
GcodeLayerThreader<T>::GcodeLayerThreader(
int start_item_argument_index,
int end_item_argument_index,
const std::function<T* (int)>& produce_item,
const std::function<void (T*)>& consume_item,
const unsigned int max_task_count
)
: start_item_argument_index(start_item_argument_index)
, end_item_argument_index(end_item_argument_index)
, item_count(end_item_argument_index - start_item_argument_index)
, max_task_count(max_task_count)
, produce_item(produce_item)
, consume_item(consume_item)
, last_produced_argument_index(start_item_argument_index - 1)
{
produced.resize(item_count, nullptr);
}
template <typename T>
void GcodeLayerThreader<T>::run()
{
#pragma omp parallel
{
#ifdef _OPENMP
#pragma omp master
log("Multithreading GcodeLayerThreader with %i threads.\n", omp_get_num_threads());
#endif // _OPENMP
while (true)
{
if (finished())
{
break;
}
act();
}
}
}
template <typename T>
void GcodeLayerThreader<T>::produce(int item_argument_index)
{
T* produced_item = produce_item(item_argument_index);
int item_idx = item_argument_index - start_item_argument_index;
#pragma omp critical
{
produced[item_idx] = produced_item;
if (item_idx == last_consumed_idx + 1 && item_idx < end_item_argument_index - start_item_argument_index)
{
assert(!to_be_consumed_item_idx && "the just produced item shouldn't be consumable already!");
to_be_consumed_item_idx = item_idx;
}
}
}
template <typename T>
void GcodeLayerThreader<T>::consume(int item_idx)
{
consume_item(produced[item_idx]);
produced[item_idx] = nullptr;
#pragma omp critical
{
assert(item_idx == last_consumed_idx + 1);
last_consumed_idx = item_idx;
if (last_consumed_idx + 1 < end_item_argument_index - start_item_argument_index && produced[last_consumed_idx + 1])
{
assert(!to_be_consumed_item_idx && "The next produced item shouldn't already be noted as being consumable because of the lock!");
to_be_consumed_item_idx = last_consumed_idx + 1;
}
active_task_count--;
assert(active_task_count >= 0);
}
}
template <typename T>
void GcodeLayerThreader<T>::act()
{
{
int item_idx = -1;
#pragma omp critical
{
if (to_be_consumed_item_idx && consume_lock.test_lock())
{
item_idx = *to_be_consumed_item_idx;
to_be_consumed_item_idx = nullptr;
}
}
if (item_idx >= 0)
{
consume(item_idx);
consume_lock.unlock();
return;
}
}
{
std::optional<int> item_argument_index;
#pragma omp critical
{
if (active_task_count < max_task_count)
{
item_argument_index = ++last_produced_argument_index;
active_task_count++;
}
}
if (item_argument_index && *item_argument_index < end_item_argument_index)
{
produce(*item_argument_index);
return;
}
}
#ifdef _OPENMP
// thread is blocked by too many items being processed
std::this_thread::sleep_for(std::chrono::milliseconds(500));
#endif
}
template <typename T>
bool GcodeLayerThreader<T>::finished()
{
bool finished;
#pragma omp critical
{
finished = last_produced_argument_index >= end_item_argument_index - 1
&& !to_be_consumed_item_idx;
}
return finished;
}
} // namespace cura
#endif // GCODE_LAYER_THREADER_H
|
ringBuffer.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include "ringBuffer.h"
static uint32_t uppow2(uint32_t v) {
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
ringBuffer* rb_create(size_t len, size_t size) {
size_t upsize = uppow2(len);
ringBuffer* buffer = malloc(sizeof(ringBuffer) + (upsize * size));
if (buffer == NULL) { return NULL; }
memset(buffer, 0, sizeof(ringBuffer) + (upsize * size));
buffer->len = upsize;
buffer->size = size;
return buffer;
}
void rb_destroy(ringBuffer* r) {
free(r);
}
size_t rb_fillLevel(ringBuffer* r) {
int const mask = (r->head - r->tail) >> sizeof(int) * 7;
return ((r->head - r->tail) ^ mask) - mask;
}
void rb_reset(ringBuffer* r) {
r->head = r->tail = 0;
}
void rb_write(ringBuffer* r, void* src) {
if (rb_fillLevel(r) != r->len) {
memcpy(&r->A[(r->tail & r->len-1) * r->size], src, r->size);
r->tail++;
}
}
void rb_overWrite(ringBuffer* r, void* src) {
memcpy(&r->A[(r->tail & r->len-1) * r->size], src, r->size);
if (rb_fillLevel(r) == r->len) {
r->head++;
}
r->tail++;
}
void rb_read(ringBuffer* r, void* dest) {
if (rb_fillLevel(r) != 0) {
memcpy(dest, &r->A[(r->head & r->len-1) * r->size], r->size);
r->head++;
}
}
void rb_dump(ringBuffer* r, void* dest) {
omp_set_num_threads(omp_get_num_procs());
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < rb_fillLevel(r); i++) {
memcpy(dest + (i * r->size), &r->A[((r->head + i) & r->len-1) * r->size], r->size);
}
} |
cmat.c | /*
* Cheap maxtrix library for C
*
* Copyright (C) 2019 Hiroshi Kuwagata <kgt9221@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "cmat.h"
#define DEFAULT_ERROR __LINE__
#define DEFAULT_CUTOFF 1e-10
#define GROW(n) ((n * 13) / 10)
#define SHRINK(n) ((n * 10) / 13)
#define SWAP(a,b,t) do {t c; c = (a); (a) = (b); (b) = c;} while(0)
static int
alloc_object(int rows, int cols, cmat_t* org, cmat_t** dst)
{
int ret;
double* tbl;
double** row;
cmat_t* obj;
int i;
/*
* initialize
*/
ret = 0;
tbl = NULL;
row = NULL;
obj = NULL;
do {
/*
* alloc memory
*/
obj = (cmat_t*)malloc(sizeof(cmat_t));
if (obj == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
if (rows > 0) {
tbl = (double*)malloc(sizeof(double) * rows * cols);
if (tbl == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
row = (double**)malloc(sizeof(double*) * rows);
if (row == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
}
/*
* setup object
*/
for (i = 0; i < rows; i++) {
row[i] = tbl + (i * cols);
}
obj->tbl = tbl;
obj->row = row;
obj->rows = rows;
obj->cols = cols;
obj->capa = rows;
if (org) {
obj->coff = org->coff;
} else {
obj->coff = DEFAULT_CUTOFF;
}
*dst = obj;
} while (0);
/*
* post process
*/
if (ret) {
if (obj) free(obj);
if (tbl) free(tbl);
if (row) free(row);
}
return ret;
}
static void
free_object(cmat_t* ptr)
{
if (ptr->tbl) free(ptr->tbl);
if (ptr->row) free(ptr->row);
free(ptr);
}
static void
replace_object(cmat_t* ptr, cmat_t** src)
{
free(ptr->tbl);
free(ptr->row);
memcpy(ptr, *src, sizeof(cmat_t));
free(*src);
*src = NULL;
}
static int
alloc_table(double** src, int rows, int cols, double** dt, double*** dr)
{
int ret;
double* tbl;
double** row;
int i;
ret = 0;
tbl = NULL;
row = NULL;
do {
tbl = (double*)malloc(sizeof(double) * rows * cols);
if (tbl == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
row = (double**)malloc(sizeof(double*) * rows);
if (row == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
} while (0);
if (!ret) {
for (i = 0; i < rows; i++) {
row[i] = tbl + (i * cols);
if (src) memcpy(row[i], src[i], sizeof(double) * cols);
}
*dt = tbl;
*dr = row;
}
if(ret) {
if (tbl) free(tbl);
if (row) free(row);
}
return ret;
}
static int
format(double val, char* dst, double thr)
{
int ret;
int i;
if (fabs(val) > thr) {
sprintf(dst, "% f", val);
for (i = strlen(dst) - 1; i > 0; i--) {
switch (dst[i]) {
case '0':
break;
case '.':
dst[i + 0] = '\0';
ret = i;
goto loop_out;
default:
dst[i + 1] = '\0';
ret = i + 1;
goto loop_out;
}
}
} else {
strcpy(dst, " 0");
ret = 1;
}
loop_out:
return ret;
}
static inline int
fcmp(double f1, double f2, double coff)
{
double v1;
int e1;
double v2;
int e2;
v1 = frexp(f1, &e1);
v2 = frexp(f2, &e2);
if (e1 == e2) {
f1 = v1;
f2 = v2;
}
#if 0
if (fabs(f1 - f2) > coff) {
printf("%.20f %.20f %.20g\n", f1, f2, fabs(f1 - f2));
}
#endif
return fabs(f1 - f2);
}
/*
* http://hooktail.org/computer/index.php?LU%CA%AC%B2%F2
*/
static int
lu_decomp(double** row, int sz, double thr, int* piv)
{
int ret;
int i;
int j;
int k;
double max;
double tmp;
double* pi;
double* pj;
ret = 0;
if (piv) {
for (i = 0; i < sz; i++) piv[i] = i;
}
for (i = 0; i < sz; i++) {
pi = row[i];
max = fabs(pi[i]);
k = i;
/* 注目行以降で最大の値(絶対値)の存在する行を探す */
for (j = i + 1; j < sz; j++) {
tmp = fabs(row[j][i]);
/*
* 浮動小数点数の丸め誤差の蓄積のため、極小差の場合に大小比較がうまくい
* かないことがある。これを避けるために差の閾値比較で大小比較を行ってい
* る。以下のif文は tmp > max を評価している。
*/
if (tmp - max > thr) {
max = tmp;
k = j;
}
}
/* 注目行と最大値のあった行を入れ替える */
if (k != i) {
SWAP(row[i], row[k], double*);
if (piv) SWAP(piv[i], piv[k], int);
pi = row[i];
ret++;
}
/* この時点で対角成分が0の場合は注目行に対する分解は終わってると
考えてよいので次の行に移動する */
if (pi[i] == 0.0) continue;
/* forwarding erase */
#pragma omp parallel for private(k,pj,tmp)
for (j = i + 1; j < sz; j++) {
pj = row[j];
tmp = (pj[i] /= pi[i]);
for (k = i + 1; k < sz; k++) {
pj[k] -= tmp * pi[k];
}
}
}
return ret;
}
static double
det(double a, double b, double c, double d)
{
return (a * d) - (b * c);
}
static double
calc_det_dim2(double* r1, double* r2)
{
return det(r1[0], r1[1], r2[0], r2[1]);
}
static double
calc_det_dim3(double* r1, double* r2, double* r3)
{
double ret;
ret = (r1[0] * det(r2[1], r2[2], r3[1], r3[2])) -
(r2[0] * det(r1[1], r1[2], r3[1], r3[2])) +
(r3[0] * det(r1[1], r1[2], r2[1], r2[2]));
return ret;
}
static int
calc_det(double** row, int sz, double thr, double* dst)
{
int ret;
double* wt; // as "Work Table"
double** wr; // as "Work Rows"
double det;
int i;
int j;
int n;
do {
ret = 0;
wt = NULL;
wr = NULL;
/* alloc work buffer */
ret = alloc_table(row, sz, sz, &wt, &wr);
if (ret) break;
/* do LU decomposition */
n = lu_decomp(wr, sz, thr, NULL);
/* calc diagonal multiplier */
det = (n & 1)? -1.0: 1.0;
for (i = 0; i < sz; i++) {
det *= wr[i][i];
}
} while (0);
if (wr) free(wr);
if (wt) free(wt);
if (!ret) *dst = det;
return ret;
}
static void
calc_inverse(double** src, int n, double** dst)
{
int i;
int j;
int k;
double max;
double tmp;
double* si;
double* sj;
double* di;
double* dj;
/* create identity matrix */
for (i = 0; i < n; i++) {
memset(dst[i], 0, sizeof(double) * n);
dst[i][i] = 1.0;
}
/* do row reduction method */
for (i = 0; i < n; i++) {
si = src[i];
di = dst[i];
max = fabs(si[i]);
k = i;
/* ピボット操作 */
// 注目行以降で最大の値(絶対値)の存在する行を探す
for (j = i + 1; j < n; j++) {
tmp = fabs(src[j][i]);
if (tmp > max) {
max = tmp;
k = j;
}
}
// 注目行と最大値のあった行を入れ替える
if (i != k) {
SWAP(src[i], src[k], double*);
SWAP(dst[i], dst[k], double*);
si = src[i];
di = dst[i];
}
if (si[i] == 0.0) continue;
/* ここからガウス・ジョルダン法 */
tmp = 1.0 / si[i];
for (j = 0; j < n; j++) {
si[j] *= tmp;
di[j] *= tmp;
}
#pragma omp parallel for private(k,sj,dj,tmp)
for (j = 0; j < n; j++) {
if (i == j) continue;
sj = src[j];
dj = dst[j];
tmp = sj[i];
for (k = 0; k < n; k++) {
sj[k] -= si[k] * tmp;
dj[k] -= di[k] * tmp;
}
}
}
}
static void
sort(int* a, size_t n)
{
int h;
int f;
int i;
/*
* sort by ascending order
*/
h = n;
do {
if (h > 1) {
h = SHRINK(h);
} else if (!f) {
break;
}
f = 0;
if (h == 9 || h == 10) h = 11;
for (i = 0; i < ((int)n - h); i++) {
if (a[i] > a[i + h]) {
SWAP(a[i], a[i + h], int);
f = !0;
}
}
} while (1);
}
/**
* 行列オブジェクトの生成
*
* @param n 列数の指定
* @param dst 生成したオブジェクトの格納先のポインタ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_new(double* src, int rows, int cols, cmat_t** dst)
{
int ret;
cmat_t* obj;
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* argument check
*/
do {
if (rows < 0) {
ret = CMAT_ERR_BSIZE;
break;
}
if (cols <= 0) {
ret = CMAT_ERR_BSIZE;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while(0);
/*
* alloc memory
*/
if (!ret) {
ret = alloc_object(rows, cols, NULL, &obj);
}
/*
* set initial values
*/
if (!ret) {
if (src) {
memcpy(obj->tbl, src, sizeof(double) * rows * cols);
} else {
memset(obj->tbl, 0, sizeof(double) * rows * cols);
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = obj;
}
/*
* post process
*/
if (ret) {
if (obj) free_object(obj);
}
return ret;
}
/**
* 行列オブジェクトの複製
*
* @param ptr 複製元になる行列オブジェクト
* @param dst 生成したオブジェクトの格納先のポインタ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_clone(cmat_t* ptr, cmat_t** dst)
{
int ret;
cmat_t* obj;
int i;
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while(0);
/*
* alloc memory
*/
if (!ret) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
}
/*
* copy values
*/
if (!ret) {
memcpy(obj->tbl, ptr->tbl, sizeof(double) * ptr->rows * ptr->cols);
for (i = 0; i < ptr->rows; i ++) {
obj->row[i] = obj->tbl + (ptr->row[i] - ptr->tbl);
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = obj;
}
return ret;
}
/**
* 行列オブジェクトの削除
*
* @param dst 削除するオブジェクトのポインタ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_destroy(cmat_t* ptr)
{
int ret;
/*
* initialize
*/
ret = 0;
/*
* argument check
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* release memory
*/
if (!ret) {
free_object(ptr);
}
return ret;
}
/**
* 行列オブジェクトの内容表示
*
* @param ptr 対象の行列オブジェクト
*
* @return エラーコード(0で正常終了)
*/
int
cmat_print(cmat_t* ptr, char* label)
{
int ret;
int r;
int c;
double* rp; // as "Row Pointer"
char fmt[32];
char str[32];
int len;
int max;
int i;
/*
* initialize
*/
ret = 0;
/*
* argument check
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* make format string
*/
if (!ret) {
max = 0;
for (r = 0; r < ptr->rows; r++) {
rp = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
len = format(rp[c], str, ptr->coff);
if (len > max) max = len;
}
}
sprintf(fmt, "%%%ds", max);
}
/*
* show content
*/
if (!ret) {
if (label != NULL) printf("%s:\n", label);
for (r = 0; r < ptr->rows; r++) {
rp = ptr->row[r];
if (label != NULL) printf(" ");
printf("[");
for (c = 0; c < ptr->cols; c++) {
format(rp[c], str, ptr->coff);
printf(fmt, str);
if (c < (ptr->cols - 1)) printf(" ");
}
printf(" ]\n");
}
}
return ret;
}
/**
* 行の追加
*
* @param ptr 追加対象の行列オブジェクト
* @param src 追加する行のデータ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_append(cmat_t* ptr, double* src)
{
int ret;
double* tbl;
double** row;
int capa;
int i;
/*
* initialize
*/
ret = 0;
tbl = NULL;
row = NULL;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (src == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* grow table
*/
if (!ret) do {
if (ptr->capa == ptr->rows) {
capa = (ptr->capa < 10)? 10: GROW(ptr->capa);
tbl = (double*)realloc(ptr->tbl, sizeof(double) * capa * ptr->cols);
if (tbl == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
/* LU分解などで置換が発生している可能性がある。このため既存の行配置
を再現する必要がある(==既存の行テーブルを参照する必要がある)の
でrealloc()は使わない */
row = (double**)malloc(sizeof(double*) * capa);
if (row == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
if (ptr->row) {
/* 既存の部分の行構成を再現(他の演算でピボット操作で行位置が交換さ
れている場合がある) */
for (i = 0; i < ptr->capa; i++) {
row[i] = tbl + (ptr->row[i] - ptr->tbl);
}
/* 既存行テーブルは不要になったので解放 */
free(ptr->row);
} else {
ptr->capa = 0;
}
/* 新規の部分の行構成を設定 */
for (i = ptr->capa; i < capa; i++) {
row[i] = tbl + (i * ptr->cols);
}
/* コンテキストの更新 */
ptr->tbl = tbl;
ptr->row = row;
ptr->capa = capa;
}
} while (0);
/*
* update context
*/
if (!ret) {
memcpy(ptr->row[ptr->rows++], src, sizeof(double) * ptr->cols);
}
/*
* post process
*/
if (ret) {
if (tbl) free(tbl);
if (row) free(row);
}
return ret;
}
/**
* 行列の和
* ptr + op → dst (dst != NULL)
* ptr + op → ptr (dst == NULL)
*
* @param ptr 対象の行列オブジェクト
* @param op 和行列
* @param dst 転置結果の格納先
*
* @return エラーコード(0で正常終了)
*/
int
cmat_add(cmat_t* ptr, cmat_t* op, cmat_t** dst)
{
int ret;
cmat_t* obj;
int r;
int c;
double* d;
double* s;
double* o;
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* check argument
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (op == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* check shape
*/
if (!ret) {
if (ptr->rows != op->rows || ptr->cols != op->cols) ret = CMAT_ERR_SHAPE;
}
/*
* select target
*/
if (!ret) {
if (dst) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
} else {
obj = ptr;
}
}
/*
* do add operation
*/
if (!ret) {
#ifdef _OPENMP
#pragma omp parallel for private(d,s,o,c)
#endif /* defined(_OPENMP) */
for (r = 0; r < ptr->rows; r++) {
d = obj->row[r];
s = ptr->row[r];
o = op->row[r];
for (c = 0; c < ptr->cols; c++) {
d[c] = s[c] + o[c];
}
}
}
/*
* put return parameter
*/
if (!ret) {
if (dst) *dst = obj;
}
/*
* post process
*/
if (ret) {
if (dst && obj) free_object(obj);
}
return ret;
}
/**
* 行列の差
* ptr - op → dst (dst != NULL)
* ptr - op → ptr (dst == NULL)
*
* @param ptr 対象の行列オブジェクト
* @param op 和行列
* @param dst 転置結果の格納先
*
* @return エラーコード(0で正常終了)
*/
int
cmat_sub(cmat_t* ptr, cmat_t* op, cmat_t** dst)
{
int ret;
cmat_t* obj;
int r;
int c;
double* d;
double* s;
double* o;
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* check argument
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (op == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* check shape
*/
if (!ret) {
if (ptr->rows != op->rows || ptr->cols != op->cols) ret = CMAT_ERR_SHAPE;
}
/*
* select target
*/
if (!ret) {
if (dst) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
} else {
obj = ptr;
}
}
/*
* do add operation
*/
if (!ret) {
#ifdef _OPENMP
#pragma omp parallel for private(d,s,o,c)
#endif /* defined(_OPENMP) */
for (r = 0; r < ptr->rows; r++) {
d = obj->row[r];
s = ptr->row[r];
o = op->row[r];
for (c = 0; c < ptr->cols; c++) {
d[c] = s[c] - o[c];
}
}
}
/*
* put return parameter
*/
if (!ret) {
if (dst) *dst = obj;
}
/*
* post process
*/
if (ret) {
if (dst && obj) free_object(obj);
}
return ret;
}
/**
* 行列のスカラー積
* ptr * op → dst (dst != NULL)
* ptr * op → ptr (dst == NULL)
*
* @param ptr 対象の行列オブジェクト
* @param op スカラー値
* @param dst 転置結果の格納先
*
* @return エラーコード(0で正常終了)
*/
int
cmat_mul(cmat_t* ptr, double op, cmat_t** dst)
{
int ret;
cmat_t* obj;
int r;
int c;
double* d;
double* s;
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* check argument
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (isnan(op)) {
ret = CMAT_ERR_INVAL;
break;
}
} while (0);
/*
* select target
*/
if (!ret) {
if (dst) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
} else {
obj = ptr;
}
}
/*
* do add operation
*/
if (!ret) {
#ifdef _OPENMP
#pragma omp parallel for private(d,s,c)
#endif /* defined(_OPENMP) */
for (r = 0; r < ptr->rows; r++) {
d = obj->row[r];
s = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
d[c] = s[c] * op;
}
}
}
/*
* put return parameter
*/
if (!ret) {
if (dst) *dst = obj;
}
/*
* post process
*/
if (ret) {
if (dst && obj) free_object(obj);
}
return ret;
}
/**
* 行列の積
* ptr * op → dst (dst != NULL)
* ptr * op → ptr (dst == NULL)
*
* @param ptr 転置対象の行列オブジェクト
* @param op 積行列
* @param dst 演算結果の格納先
*
* @return エラーコード(0で正常終了)
*/
int
cmat_product(cmat_t* ptr, cmat_t* op, cmat_t** dst)
{
int ret;
cmat_t* obj;
int r;
int c;
int i;
double* d;
double* s;
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (op == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* check op value
*/
if (!ret) {
if (ptr->cols != op->rows) ret = CMAT_ERR_SHAPE;
}
/*
* alloc result object
*/
if (!ret) {
ret = alloc_object(ptr->rows, op->cols, ptr, &obj);
}
/*
* do multiple operation
*/
if (!ret) {
#ifdef _OPENMP
#pragma omp parallel for private(d,s,c)
#endif /* defined(_OPENMP) */
for (r = 0; r < ptr->rows; r++) {
d = obj->row[r];
s = ptr->row[r];
for (c = 0; c < op->cols; c++) {
d[c] = 0.0;
for (i = 0; i < ptr->cols; i++) {
d[c] += s[i] * op->row[i][c];
}
}
}
}
/*
* put return parameter
*/
if (!ret) {
if (dst) {
*dst = obj;
} else {
replace_object(ptr, &obj);
}
}
/*
* post process
*/
if (ret) {
if (obj) free_object(obj);
}
return ret;
}
/**
* 行列の転置
* transpose(ptr) → dst (dst != NULL)
* transpose(ptr) → ptr (dst == NULL)
*
* @param ptr 転置対象の行列オブジェクト
* @param dst 転置結果の格納先
*
* @return エラーコード(0で正常終了)
*/
int
cmat_transpose(cmat_t* ptr, cmat_t** dst)
{
int ret;
cmat_t* obj;
int r;
int c;
double* s;
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* argument check
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* alloc result object
*/
if (!ret) {
ret = alloc_object(ptr->cols, ptr->rows, ptr, &obj);
}
/*
* do transpose operation
*/
if (!ret) {
for (r = 0; r < ptr->rows; r++) {
s = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
obj->row[c][r] = s[c];
}
}
}
/*
* put return parameter
*/
if (!ret) {
if (dst) {
*dst = obj;
} else {
replace_object(ptr, &obj);
}
}
/*
* post process
*/
if (ret) {
if (obj) cmat_destroy(obj);
}
return ret;
}
/**
* 逆行列の算出
* inverse(ptr) → dst (dst != NULL)
* inverse(ptr) → ptr (dst == NULL)
*
* @param ptr 対象の行列オブジェクト
* @param dst 逆行列の格納先
*
* @return エラーコード(0で正常終了)
*
* @refer http://thira.plavox.info/blog/2008/06/_c.html
* http://www.yamamo10.jp/yamamoto/lecture/2006/5E/
* Linear_eauations/gaussj_html/node2.html
*/
int
cmat_inverse(cmat_t* ptr, cmat_t** dst)
{
int ret;
cmat_t* obj;
double det;
int i;
double* st; // as "Source Table"
double** sr; // as "Source Row"
double* dt; // as "Destination Table"
double** dr; // as "destination Row"
/*
* initialize
*/
ret = 0;
obj = NULL;
st = NULL;
sr = NULL;
dt = NULL;
dr = NULL;
/*
* argument check
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* check if it's a regular matrix
*/
if (!ret) {
ret = cmat_det(ptr, &det);
}
if (!ret) {
if (fabs(det) < ptr->coff) ret = CMAT_ERR_NREGL;
}
/*
* alloc work(or output) memory
*/
if (!ret) {
ret = alloc_table(NULL, ptr->capa, ptr->cols, &st, &sr);
}
/*
* alloc result object
*/
if (!ret) {
if (dst) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
if (!ret) {
for (i = 0; i < ptr->rows; i++) {
memcpy(sr[i], ptr->row[i], sizeof(double) * ptr->cols);
}
dt = obj->tbl;
dr = obj->row;
}
} else {
dt = st;
dr = sr;
st = ptr->tbl;
sr = ptr->row;
}
}
/*
* calculate inverse matrix
*/
if (!ret) {
calc_inverse(sr, ptr->rows, dr);
}
/*
* put return parameter
*/
if (!ret) {
if (dst) {
*dst = obj;
} else {
free(ptr->tbl);
free(ptr->row);
ptr->tbl = dt;
ptr->row = dr;
}
}
/*
* post process
*/
if (ret) {
if (dst) {
if (obj) free_object(obj);
} else {
if (dt) free(dt);
if (dr) free(dr);
}
}
if (dst) {
if (st) free(st);
if (sr) free(sr);
}
return ret;
}
/**
* 行列のLU分解
* LU_decomp(ptr) → dst (dst != NULL)
* LU_decomp(ptr) → ptr (dst == NULL)
*
* @param ptr 対象の行列オブジェクト
* @param dst 分解行列の格納先
* @param piv 置換数列の格納先(必要ない場合はNULLを指定)
*
* @return エラーコード(0で正常終了)
*
* @note 上三角行列とした三角行列を合成した状態で出力するので注意
* 出力行列は以下のようになる。
* UUUUUU
* LUUUUU
* LLUUUU
* LLLUUU
* LLLLUU
* LLLLLU
* ※LU分解時の下三角行列の対角要素はすべて1なので省略している点に注意。
*
* @note 置換数列は置換先の数列で返される。置換行列への変換は呼び出し側で
* 行う必要がある。
*
* @refer http://thira.plavox.info/blog/2008/06/_c.html
*/
int
cmat_lu_decomp(cmat_t* ptr, cmat_t** dst, int* piv)
{
int ret;
cmat_t* obj;
int i;
double** row; // as "Source Row"
/*
* initialize
*/
ret = 0;
obj = NULL;
row = NULL;
/*
* argument check
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* alloc result object
*/
if (!ret) {
if (dst) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
if (!ret) {
for (i = 0; i < ptr->rows; i++) {
memcpy(obj->row[i], ptr->row[i], sizeof(double) * ptr->cols);
}
row = obj->row;
}
} else {
row = ptr->row;
}
}
/*
* do LU decompression
*/
if (!ret) {
lu_decomp(row, ptr->rows, ptr->coff, piv);
}
/*
* put return parameter
*/
if (!ret) {
if (dst) *dst = obj;
}
/*
* post process
*/
if (ret) {
if (dst) {
if (obj) free_object(obj);
}
}
return ret;
}
/**
* 行列式の計算
* det(ptr) → dst
*
* @param ptr 対象の行列オブジェクト
* @param dst 算出結果の格納先のポインタ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_det(cmat_t* ptr, double* dst)
{
int ret;
double det;
/*
* initialize
*/
ret = 0;
det = -1.0;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* check shape
*/
if (!ret) {
if (ptr->rows != ptr->cols) ret = CMAT_ERR_SHAPE;
}
/*
* calc determinant
*/
if (!ret) {
switch (ptr->rows) {
case 1: // when 1x1
det = ptr->tbl[0];
break;
case 2: // when 2x2
det = calc_det_dim2(ptr->row[0], ptr->row[1]);
break;
case 3: // when 3x3
det = calc_det_dim3(ptr->row[0], ptr->row[1], ptr->row[2]);
break;
default: // when nxn
ret = calc_det(ptr->row, ptr->rows, ptr->coff, &det);
break;
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = det;
}
return ret;
}
/**
* 行列のドット積の計算
* ptr * op → dst
*
* @param ptr 対象の行列オブジェクト
* @param op オペランド
* @param dst 算出結果の格納先のポインタ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_dot(cmat_t* ptr, cmat_t* op, double* dst)
{
int ret;
double dot;
int i;
int n;
int r1;
int r2;
double* s;
double* o;
/*
* initialize
*/
ret = 0;
dot = 0.0;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (op == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* check shape
*/
if (!ret) {
if ((ptr->rows * ptr->cols) != (op->rows * op->cols)) ret = CMAT_ERR_SHAPE;
}
/*
* calc dot product
*/
if (!ret) {
n = ptr->rows * ptr->cols;
r1 = 0;
r2 = 0;
for (i = 0; i < n; i++) {
if (i % ptr->cols == 0) s = ptr->row[r1++];
if (i % op->cols == 0) o = op->row[r2++];
dot += s[i % ptr->cols] * o[i % op->cols];
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = dot;
}
return ret;
}
#ifdef DEBUG
/**
* 最大値の取得
*
* @param ptr 対象の行列オブジェクト
* @param dst 最大値を格納する領域
*
* @return エラーコード
*
* @note 本関数では絶対値で最大の値を探査する
*/
int
cmat_abs_max(cmat_t* ptr, double* dst)
{
int ret;
double max;
int r;
int c;
double* row;
/*
* initialize
*/
ret = 0;
max = 0.0f;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* lookup maximum value
*/
if (!ret) {
for (r = 0; r < ptr->rows; r++) {
row = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
if (fabs(row[c]) > fabs(max)) max = row[c];
}
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = max;
}
return ret;
}
/**
* 最小値の取得
*
* @param ptr 対象の行列オブジェクト
* @param dst 最小値を格納する領域
*
* @return エラーコード
*
* @note 本関数では絶対値で最小の値を探査する
*/
int
cmat_abs_min(cmat_t* ptr, double* dst)
{
int ret;
double min;
int r;
int c;
double* row;
/*
* initialize
*/
ret = 0;
min = DBL_MAX;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* lookup minimum value
*/
if (!ret) {
if (ptr->rows > 0 && ptr->cols > 0) {
for (r = 0; r < ptr->rows; r++) {
row = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
if (fabs(row[c]) < fabs(min)) min = row[c];
}
}
} else {
min = 0.0f;
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = min;
}
return ret;
}
/**
* 行の置換
*
* @param ptr 置換対象の行列オブジェクト
* @param _piv 置換数列
*
* @return エラーコード
*
* @note 本館数は呼び出しオブジェクトを書き換える。
* @note 置換数列にはcmat_lu_decomp()が返す数列をそのまま使用できる。
*/
int
cmat_permute_row(cmat_t* ptr, int* _piv)
{
int ret;
int r;
int* piv;
int i;
/*
* initialize
*/
ret = 0;
piv = NULL;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (_piv == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* alloc pivots array
*/
if (!ret) {
piv = (int*)malloc(sizeof(int) * ptr->rows);
if (piv == NULL) ret = CMAT_ERR_NOMEM;
}
/*
* check pivots
*/
if (!ret) {
memcpy(piv, _piv, sizeof(int) * ptr->rows);
sort(piv, ptr->rows);
for (i = 0; i < ptr->rows; i++) {
if (piv[i] != i) {
ret = CMAT_ERR_INVAL;
break;
}
}
}
/*
* do permutation row
*/
if (!ret) {
memcpy(piv, _piv, sizeof(int) * ptr->rows);
for (r = 0; r < ptr->rows; r++) {
for (i = r; i < ptr->rows; i++) {
if (piv[i] == r) break;
}
if (r != i) {
SWAP(ptr->row[r], ptr->row[i], double*);
SWAP(piv[r], piv[i], int);
}
}
}
/*
* post process
*/
if (piv) free(piv);
return ret;
}
/**
* 列の置換
*
* @param ptr 置換対象の行列オブジェクト
* @param piv 置換数列
*
* @return エラーコード
*
* @note 本館数は呼び出しオブジェクトを書き換える。
* @note 置換数列にはcmat_lu_decomp()が返す数列をそのまま使用できる。
*/
int
cmat_permute_column(cmat_t* ptr, int* _piv)
{
int ret;
int r;
int* piv;
double* p;
int i;
int j;
/*
* initialize
*/
ret = 0;
piv = NULL;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (_piv == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* alloc pivots array
*/
if (!ret) {
piv = (int*)malloc(sizeof(int) * ptr->cols);
if (piv == NULL) ret = CMAT_ERR_NOMEM;
}
/*
* check pivots
*/
if (!ret) {
memcpy(piv, _piv, sizeof(int) * ptr->cols);
sort(piv, ptr->rows);
for (i = 0; i < ptr->cols; i++) {
if (piv[i] != i) {
ret = CMAT_ERR_INVAL;
break;
}
}
}
/*
* do permutation column
*/
if (!ret) {
memcpy(piv, _piv, sizeof(int) * ptr->cols);
for (r = 0; r < ptr->rows; r++) {
for (i = r; i < ptr->cols; i++) {
if (piv[i] == r) break;
}
if (r != i) {
for (j = 0; j < ptr->rows; j++) {
SWAP(ptr->row[j][r], ptr->row[j][i], double);
}
SWAP(piv[r], piv[i], int);
}
}
}
/*
* post process
*/
if (piv) free(piv);
return ret;
}
#endif /* defined(DEBUG) */
/**
* 行列の比較
*
* @param ptr 対象の行列オブジェクト
* @param op 比較対象の行列オブジェクト
* @param dst チェック結果先のポインタ(0で一致)
*
* @return エラーコード(0で正常終了)
*/
int
cmat_compare(cmat_t* ptr, cmat_t* op, int* dst)
{
int ret;
int res;
int r;
int c;
double* p;
double* o;
/*
* initialize
*/
ret = 0;
res = !0;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (op == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* compare matrixies
*/
if (!ret) do {
/* check shape */
if (ptr->rows != op->rows || ptr->cols != op->cols) break;
/* check values */
for (r = 0; r < ptr->rows; r++) {
p = ptr->row[r];
o = op->row[r];
for (c = 0; c < ptr->cols; c++) {
if (fcmp(p[c], o[c], ptr->coff)) goto loop_out;
}
}
/* mark matched */
res = 0;
} while (0);
loop_out:
/*
* put return parameter
*/
if (!ret) {
*dst = res;
}
return ret;
}
/**
* 行列内容のチェック
*
* @param ptr 転置対象の行列オブジェクト
* @param val チェックする行列を一次元展開した配列
* @param dst チェック結果先のポインタ(0で一致)
*
* @return エラーコード(0で正常終了)
*/
int
cmat_check(cmat_t* ptr, double* val, int* dst)
{
int ret;
int res;
int r;
int c;
double* p;
/*
* initialize
*/
ret = 0;
res = !0;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (val == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* put return parameter
*/
if (!ret) {
/* check values */
for (r = 0; r < ptr->rows; r++) {
p = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
if (fcmp(p[c], *val++, ptr->coff)) goto loop_out;
}
}
/* mark matched */
res = 0;
}
loop_out:
/*
* put return parameter
*/
if (!ret) {
*dst = res;
}
return ret;
}
/**
* 切り捨て処理の閾値の設定
*
* @param ptr 対象の行列オブジェクト
* @param val 閾値の値
*
* @return エラーコード(0で正常終了)
*/
int
cmat_set_cutoff_threshold(cmat_t* ptr, double val)
{
int ret;
/*
* initialize
*/
ret = 0;
/*
* check argument
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* update context
*/
if (!ret) {
ptr->coff = fabs(val);
}
return ret;
}
|
sudoku-omp.c | ////////////////////////////////////////////////////////////
//// Includes
////////////////////////////////////////////////////////////
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <math.h>
////////////////////////////////////////////////////////////
//// Structures
////////////////////////////////////////////////////////////
struct Puzzle {
int root_n;
int depth;
int n;
int ** matrix;
};
////////////////////////////////////////////////////////////
//// Types
////////////////////////////////////////////////////////////
typedef struct Puzzle Puzzle;
typedef int bool;
////////////////////////////////////////////////////////////
//// Defines
////////////////////////////////////////////////////////////
#define false 0
#define true 1
// get the size of elements on an array
#define NELEMS(x) (sizeof(x) / sizeof((x)[0]))
////////////////////////////////////////////////////////////
//// Global Variables
////////////////////////////////////////////////////////////
static double _start_;
static double _end_;
static int _offset_ = 10;
static bool _time_flag_ = false;
static bool _time_only_flag_ = false;
static int _tasks_in_process_ = 0;
static int _states_searched_ = 0;
////////////////////////////////////////////////////////////
//// Function Prototypes
////////////////////////////////////////////////////////////
void debug_puzzle(Puzzle * puzzle);
void print_puzzle_to_file(FILE * file,Puzzle * puzzle);
bool check_grid(Puzzle * puzzle, int row, int column, int number);
bool check_column(Puzzle * puzzle, int column, int number);
bool check_row(Puzzle * puzzle, int row, int number);
bool is_valid(Puzzle * puzzle, int row, int column, int number);
bool find_empty(Puzzle * puzzle, int * row, int * column);
bool solve(Puzzle * puzzle);
Puzzle * copy(Puzzle * puzzle);
void cleanPuzzle(Puzzle * puzzle);
void end_on_solution_found(Puzzle * puzzle);
////////////////////////////////////////////////////////////
//// Main Execution
////////////////////////////////////////////////////////////
/**
* Parallel Sudoku Solver using OpenMP
*
* @param argc Number of command line arguments.
* @param argv Array of command line arguments.
* @return Returns EXIT_SUCCESS on finishing the execution successful.
*/
int main(int argc, char *argv[]){
// starts counter
_start_ = omp_get_wtime();
FILE * file_input;
FILE * file_output;
char * filename;
// Check if file path was passed as an argument
if (argc > 4){
printf("ERROR: Too many arguments.\n");
exit(EXIT_FAILURE);
} else if (argc < 2) {
printf("ERROR: Missing arguments.\n");
exit(EXIT_FAILURE);
} else if(argc == 3 && strcmp(argv[2], "-to") == 0) {
_time_only_flag_ = true;
} else if(argc == 3 && strcmp(argv[2], "-t") == 0) {
_time_flag_ = true;
} else if(argc > 3 && (strcmp(argv[2], "-to") == 0 || strcmp(argv[3], "-to") == 0)) {
_time_only_flag_ = true;
}
filename = argv[1];
// Open file in read mode
if ((file_input = fopen(filename,"r")) == NULL){
printf("ERROR: Could not open file %s\n",filename);
exit(EXIT_FAILURE);
}
// Number of rows and columns
int n;
// Square root of n
int root_n;
// Read first line from the file to get n
fscanf(file_input, "%d\n", &root_n);
n = root_n * root_n;
// ======================================
/** Initialize puzzle data structure */
// Puzzle matrix N x N
Puzzle * puzzle = malloc(sizeof(Puzzle));
puzzle->n = n;
puzzle->root_n = root_n;
puzzle->depth = 1;
puzzle->matrix = (int**) malloc(n * sizeof(int*));
int i;
for (i = 0; i < n; ++i){
puzzle->matrix[i] = (int * )malloc(n * sizeof(int));
}
// Read matrix from the file
int cursor;
int row = 0, col = 0, j;
for (i = 0; i < n; ++i){
for (j = 0; j < n; ++j){
fscanf(file_input,"%d",&puzzle->matrix[i][j]);
}
fscanf(file_input, "\n");
}
// ======================================
// Close file
fclose(file_input);
//////////////////////////////////////////////////////////
////// START
//////////////////////////////////////////////////////////
//omp_set_num_threads(2);
// initialize threads
#pragma omp parallel
{
//printf("nº threads: %d\n", omp_get_num_threads());
// only one of the threads initiate the solve method
#pragma omp single
{
if(!solve(puzzle)){
// if no solution was found
_end_ = omp_get_wtime();
if (_time_only_flag_) {
printf("Elapsed time: %f (s)\n", _end_ - _start_);
} else if (_time_flag_) {
printf("No solution\n");
printf("Searched %d states in total.\n", _states_searched_);
printf("Elapsed time: %f (s)\n", _end_ - _start_);
} else {
printf("No solution\n");
}
}
}
}
return EXIT_SUCCESS;
}
/**
* Print the puzzle matrix.
* @param puzzle Sudoku puzzle data structure.
*/
void debug_puzzle(Puzzle * puzzle){
#pragma omp critical
{
if (puzzle != NULL) {
int n = puzzle->n;
int i, j;
for (i = 0; i < n; ++i){
for (j = 0; j < n; ++j){
printf("%d ", puzzle->matrix[i][j]);
}
printf("\n");
}
}
}
}
/**
* Print the puzzle matrix on file.
*
* @param file file data structure to print the sudoku puzzle.
* @param puzzle Sudoku puzzle data structure.
*/
void print_puzzle_to_file(FILE * file, Puzzle * puzzle){
int n = puzzle->n;
int i, j;
for (i = 0; i < n; ++i){
for (j = 0; j < n; ++j){
fprintf(file, "%d ", puzzle->matrix[i][j]);
}
fprintf(file, "\n");
}
}
/**
* Check if number is already in a sub grid of the puzzle matrix.
*
* @param puzzle Sudoku puzzle data structure.
* @param row Row of the puzzle to check the value.
* @param column Column of the puzzle to check the value.
* @param number Value to compare to the one in the position given by the row and column.
* @return Returns true if the number is inside one of the sub-grids of the matrix.
*/
bool check_grid(Puzzle * puzzle, int row, int column, int number){
int i, j;
for (i = 0; i < puzzle->root_n; ++i){
for (j = 0; j < puzzle->root_n; ++j){
if (puzzle->matrix[i + row][j + column] == number){
return true;
}
}
}
return false;
}
/**
* Check if a number is already in a column.
*
* @param puzzle Sudoku puzzle data structure.
* @param column Column of the puzzle to check the value.
* @param number Value to compare to the one in the position given by the column.
* @return Returns true if the number is in the column.
*/
bool check_column(Puzzle * puzzle, int column, int number){
int i;
for (i = 0; i < puzzle->n; ++i){
if(puzzle->matrix[i][column] == number){
return true;
}
}
return false;
}
/**
* Check if a number is already in a row.
*
* @param puzzle Sudoku puzzle data structure.
* @param row Row of the puzzle to check the value.
* @param number Value to compare to the one in the position given by the row.
* @return Returns true if the number is in the row.
*/
bool check_row(Puzzle * puzzle, int row, int number){
int i;
for (i = 0; i < puzzle->n; ++i){
if (puzzle->matrix[row][i] == number){
return true;
}
}
return false;
}
/**
* Check if a number is already in a matrix cell according to sudoku rules.
*
* @param puzzle Sudoku puzzle data structure.
* @param row Row of the puzzle to check the value.
* @param column Column of the puzzle to check the value.
* @param number Comparison value.
* @return Returns true if the number is not valid.
*/
bool is_valid(Puzzle * puzzle, int row, int column, int number){
return !(check_row(puzzle, row, number)) &&
!(check_column(puzzle, column, number)) &&
!(check_grid(puzzle, row - row % puzzle->root_n, column - column % puzzle->root_n, number));
}
/**
* Find a empty cell in the sudoku puzzle.
*
* @param puzzle Sudoku puzzle data structure.
* @param row Row number reference.
* @param column Column number reference.
* @return Returns true if the puzzle has an empty position.
*/
bool find_empty(Puzzle * puzzle, int * row, int * column){
for (*row = 0; *row < puzzle->n; (*row)++){
for (*column = 0; *column < puzzle->n; (*column)++){
if (puzzle->matrix[*row][*column] == 0){
return true;
}
}
}
return false;
}
/**
* Attemp to solve the sudoku puzzle using backtracking.
*
* @param puzzle Sudoku puzzle data structure.
* @return Returns true if the sudoku has a solution.
*/
bool solve(Puzzle * puzzle) {
_states_searched_ ++;
int row = 0, col = 0;
int depth = puzzle->depth;
// Check if puzzle is complete
if (!find_empty(puzzle, &row, &col)){
return true;
}
int i;
for (i = 1; i <= puzzle->n; ++i){
// Check if number can be placed in a cell
if (is_valid(puzzle, row, col, i)){
puzzle->matrix[row][col] = i;
puzzle->depth = depth + 1;
bool should_copy = false;
#pragma omp critical
{
// If there are too few tasks available
if(_tasks_in_process_ < omp_get_num_threads() - 1 &&
puzzle->depth < _offset_){
should_copy = true;
_tasks_in_process_ ++;
} else {
should_copy = false;
}
}
if(should_copy) {
// creates a new copy of the sudoku puzzle
Puzzle * successor = copy(puzzle);
// creates a task
#pragma omp task default(shared) firstprivate(row, col, successor)
{
//Proceeds with a copy as a task
if (solve(successor)){
end_on_solution_found(successor);
}
_tasks_in_process_ --;
}
} else { // continues the program in serial mode
if (solve(puzzle)){
end_on_solution_found(puzzle);
}
}
// the value on the position didn't reach the solution, so change it to zero
puzzle->matrix[row][col] = 0;
}
}
#pragma omp taskwait
return false;
}
/**
* Creates a new puzzle based on a puzzle received as argument.
*
* @param Puzzle Sudoku puzzle data structure to copy.
* @return Returns a puzzle data data structure if the puzzle received
* is not NULL if it is NULL then returns NULL.
*/
Puzzle * copy(Puzzle * puzzle) {
if (puzzle == NULL) {
return NULL;
}
Puzzle * copy_puzzle = malloc(sizeof(Puzzle));
copy_puzzle->root_n = puzzle->root_n;
copy_puzzle->n = puzzle->n;
copy_puzzle->depth = puzzle->depth;
copy_puzzle->matrix = (int**) malloc(puzzle->n * sizeof(int*)); // alloc space for matrix
int i,j;
// manual copy
for (i = 0; i < puzzle->n; ++i){
copy_puzzle->matrix[i] = (int * )malloc(puzzle->n * sizeof(int)); // alloc space
for (j = 0; j < puzzle->n; ++j){
copy_puzzle->matrix[i][j] = puzzle->matrix[i][j]; // copy values
}
}
return copy_puzzle;
}
/**
* Free's a Sudoku puzzle structure.
*
* @param puzzle Sudoku puzzle data structure.
*/
void cleanPuzzle (Puzzle * puzzle) {
if (puzzle != NULL) {
int n = puzzle->n;
int i;
// Free memory
for (i = 0; i < n ; i++){
free(puzzle->matrix[i]);
}
free(puzzle->matrix);
free(puzzle);
}
}
/**
* Prints the sudoku puzzle solved and the time accordingly to the flags passed as arguments.
*
* @param puzzle Sudoku puzzle data structure.
*/
void end_on_solution_found(Puzzle * puzzle) {
_end_ = omp_get_wtime();
if (_time_only_flag_) {
printf("Elapsed time: %f (s)\n", _end_ - _start_);
} else if (_time_flag_) {
debug_puzzle(puzzle);
printf("Searched %d states in total.\n", _states_searched_);
printf("Elapsed time: %f (s)\n", _end_ - _start_);
} else {
debug_puzzle(puzzle);
}
exit(EXIT_SUCCESS);
} |
GB_unop__identity_int32_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_fc64)
// op(A') function: GB (_unop_tran__identity_int32_fc64)
// C type: int32_t
// A type: GxB_FC64_t
// cast: int32_t cij = GB_cast_to_int32_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_fc64)
(
int32_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tile_nb.c | #include <stdlib.h>
#include "tile_nb.h"
void tile_nb(float* l,int m,int n,float*output){
#pragma omp parallel
{
float* tmp2 = (float*) calloc(1,(66) * (64) * sizeof (float));
#pragma omp for
for (int H34 = 0; H34 < (((n - (1 + 0)) - (1)) / (64)); H34++) {
for (int H46 = 0; H46 < ((((m - (1 + 0)) - (1 + 0))) / (64)); H46++) {
for (int H49 = 0; H49 < 66; H49++) {
for (int H50 = 0; H50 < 64; H50++) {
float tmp3 = 0;
float tmp4 = 0;
tmp4 = l[(((m)) * ((H34) * (64) + H49)) + (H46) * (64) + H50];
float tmp5 = 0;
tmp5 = l[(((m)) * ((H34) * (64) + H49)) + (H46) * (64) + H50 + 1];
tmp3 = tmp4 + tmp5;
float tmp6 = 0;
tmp6 = l[(((m)) * ((H34) * (64) + H49)) + (H46) * (64) + H50 + 2];
tmp2[(64) * (H49) + H50] = tmp3 + tmp6;
}
}
float* x4 = tmp2;
for (int H52 = 0; H52 < 64; H52++) {
for (int H53 = 0; H53 < 64; H53++) {
float tmp7 = 0;
float tmp8 = 0;
tmp8 = x4[(((64)) * (H52)) + H53];
float tmp9 = 0;
tmp9 = x4[(((64)) * (H52 + 1)) + H53];
tmp7 = tmp8 + tmp9;
float tmp10 = 0;
tmp10 = x4[(((64)) * (H52 + 2)) + H53];
output[(((m - (1 + 0)) - (1 + 0))) * ((64) * (H34) + H52) + (64) * (H46) + H53] = tmp7 + tmp10;
}
}
}
for (int H72 = ((((m - (1 + 0)) - (1 + 0))) / (64)); H72 < ((((m - (1 + 0)) - (1 + 0))) / (64)) + ((((((m - (1 + 0)) - (1 + 0))) % (64))) + (64) - 1 ) / (64); H72++) {
for (int H73 = 0; H73 < 64; H73++) {
for (int H74 = 0; H74 < 64; H74++) {
if ((H72) * (64) + H73 < ((m - (1 + 0)) - (1 + 0))) {
if ((H34) * (64) + H74 < (n - (1 + 0)) - (1)) {
float tmp11 = 0;
float tmp12 = 0;
float tmp13 = 0;
float tmp14 = 0;
tmp14 = l[(((m)) * (1 + (H34) * (64) + H74 - (1))) + 1 + (H72) * (64) + H73 - (1)];
float tmp15 = 0;
tmp15 = l[(((m)) * (1 + (H34) * (64) + H74 - (1))) + 1 + (H72) * (64) + H73];
tmp13 = tmp14 + tmp15;
float tmp16 = 0;
tmp16 = l[(((m)) * (1 + (H34) * (64) + H74 - (1))) + 1 + (H72) * (64) + H73 + 1];
tmp12 = tmp13 + tmp16;
float tmp17 = 0;
float tmp18 = 0;
float tmp19 = 0;
tmp19 = l[(((m)) * (1 + (H34) * (64) + H74)) + 1 + (H72) * (64) + H73 - (1)];
float tmp20 = 0;
tmp20 = l[(((m)) * (1 + (H34) * (64) + H74)) + 1 + (H72) * (64) + H73];
tmp18 = tmp19 + tmp20;
float tmp21 = 0;
tmp21 = l[(((m)) * (1 + (H34) * (64) + H74)) + 1 + (H72) * (64) + H73 + 1];
tmp17 = tmp18 + tmp21;
tmp11 = tmp12 + tmp17;
float tmp22 = 0;
float tmp23 = 0;
float tmp24 = 0;
tmp24 = l[(((m)) * (1 + (H34) * (64) + H74 + 1)) + 1 + (H72) * (64) + H73 - (1)];
float tmp25 = 0;
tmp25 = l[(((m)) * (1 + (H34) * (64) + H74 + 1)) + 1 + (H72) * (64) + H73];
tmp23 = tmp24 + tmp25;
float tmp26 = 0;
tmp26 = l[(((m)) * (1 + (H34) * (64) + H74 + 1)) + 1 + (H72) * (64) + H73 + 1];
tmp22 = tmp23 + tmp26;
output[(((m - (1 + 0)) - (1 + 0))) * ((64) * (H34) + H74) + (64) * (((H72 - (((((m - (1 + 0)) - (1 + 0))) / (64)))) + ((((m - (1 + 0)) - (1 + 0))) / (64)))) + H73] = tmp11 + tmp22;
}
}
}
}
}
}
free(tmp2);
}
#pragma omp parallel for
for (int H75 = (((n - (1 + 0)) - (1)) / (64)); H75 < (((n - (1 + 0)) - (1)) / (64)) + (((((n - (1 + 0)) - (1)) % (64))) + (64) - 1 ) / (64); H75++) {
for (int H76 = 0; H76 < ((((m - (1 + 0)) - (1 + 0))) / (64)) + ((((((m - (1 + 0)) - (1 + 0))) % (64))) + (64) - 1 ) / (64); H76++) {
for (int H77 = 0; H77 < 64; H77++) {
for (int H78 = 0; H78 < 64; H78++) {
if ((H76) * (64) + H77 < ((m - (1 + 0)) - (1 + 0))) {
if ((H75) * (64) + H78 < (n - (1 + 0)) - (1)) {
float tmp27 = 0;
float tmp28 = 0;
float tmp29 = 0;
float tmp30 = 0;
tmp30 = l[(((m)) * (1 + (H75) * (64) + H78 - (1))) + 1 + (H76) * (64) + H77 - (1)];
float tmp31 = 0;
tmp31 = l[(((m)) * (1 + (H75) * (64) + H78 - (1))) + 1 + (H76) * (64) + H77];
tmp29 = tmp30 + tmp31;
float tmp32 = 0;
tmp32 = l[(((m)) * (1 + (H75) * (64) + H78 - (1))) + 1 + (H76) * (64) + H77 + 1];
tmp28 = tmp29 + tmp32;
float tmp33 = 0;
float tmp34 = 0;
float tmp35 = 0;
tmp35 = l[(((m)) * (1 + (H75) * (64) + H78)) + 1 + (H76) * (64) + H77 - (1)];
float tmp36 = 0;
tmp36 = l[(((m)) * (1 + (H75) * (64) + H78)) + 1 + (H76) * (64) + H77];
tmp34 = tmp35 + tmp36;
float tmp37 = 0;
tmp37 = l[(((m)) * (1 + (H75) * (64) + H78)) + 1 + (H76) * (64) + H77 + 1];
tmp33 = tmp34 + tmp37;
tmp27 = tmp28 + tmp33;
float tmp38 = 0;
float tmp39 = 0;
float tmp40 = 0;
tmp40 = l[(((m)) * (1 + (H75) * (64) + H78 + 1)) + 1 + (H76) * (64) + H77 - (1)];
float tmp41 = 0;
tmp41 = l[(((m)) * (1 + (H75) * (64) + H78 + 1)) + 1 + (H76) * (64) + H77];
tmp39 = tmp40 + tmp41;
float tmp42 = 0;
tmp42 = l[(((m)) * (1 + (H75) * (64) + H78 + 1)) + 1 + (H76) * (64) + H77 + 1];
tmp38 = tmp39 + tmp42;
output[(((m - (1 + 0)) - (1 + 0))) * ((64) * (((H75 - ((((n - (1 + 0)) - (1)) / (64)))) + (((n - (1 + 0)) - (1)) / (64)))) + H78) + (64) * (H76) + H77] = tmp27 + tmp38;
}
}
}
}
}
}
}
|
gen_matrices.c | /**
* Copyright (c) 2016, Kevin Lewi
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "gen_matrices.h"
/**
* Main function
*
* Must be called with the parameters n, p, and a seed, separated by spaces
*
*/
int main(int argc, char *argv[]) {
print_random_matrices_with_adj(argv[1], argv[2], argv[3], argv[4]);
return 0;
}
void fmpz_mat_mul_modp(fmpz_mat_t a, fmpz_mat_t b, fmpz_mat_t c, int n,
fmpz_t p) {
fmpz_mat_mul(a, b, c);
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
fmpz_mod(fmpz_mat_entry(a, i, j), fmpz_mat_entry(a, i, j), p);
}
}
}
void print_random_matrices_with_adj(char *n_str, char *p_str, char *simulated,
char *seed) {
int n = atoi(n_str);
int is_simulated_setup = atoi(simulated);
cryptorand_t randstate;
cryptorand_initseed(randstate, seed ? seed : "", NULL);
fmpz_t modp;
fmpz_init(modp);
fmpz_set_str(modp, p_str, 10);
fmpz_mat_t a;
fmpz_mat_init(a, n, n);
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
fmpz_randm_crypto(fmpz_mat_entry(a, i, j), randstate, modp);
}
}
fmpz_t det;
fmpz_init(det);
fmpz_mat_t adjugate;
fmpz_mat_init(adjugate, n, n);
fmpz_mat_t prod;
fmpz_mat_init(prod, n, n);
fmpz_mat_t check;
fmpz_mat_init(check, n, n);
if(is_simulated_setup) {
/* set det and adj randomly */
fmpz_randm_crypto(det, randstate, modp);
for(int i = 0; i < n; i++) {
for(int j = 0; j < n; j++) {
fmpz_randm_crypto(fmpz_mat_entry(adjugate, i, j), randstate, modp);
}
}
} else {
fmpz_modp_matrix_det(det, a, n, modp);
if (fmpz_is_zero(det)) {
fprintf(stderr, "ERROR: Random matrix was not invertible.\n");
goto exit_det;
}
fmpz_modp_matrix_adjugate(adjugate, a, n, modp);
fmpz_mat_transpose(adjugate, adjugate);
fmpz_mat_mul_modp(prod, a, adjugate, n, modp);
/* check that the adjugate and determinant were computed correctly */
fmpz_mat_one(check);
fmpz_mat_scalar_mul_fmpz(check, check, det);
int status = fmpz_mat_equal(prod, check);
if (status == 0) {
fprintf(stderr, "ERROR: Failed to produce the proper matrices.\n");
goto exit;
}
}
/* print the resulting values */
fmpz_fprint(stdout, det);
printf("\n");
fmpz_mat_fprint(stdout, a);
printf("\n");
fmpz_mat_transpose(adjugate, adjugate);
fmpz_mat_fprint(stdout, adjugate);
printf("\n");
exit:
fmpz_mat_clear(a);
fmpz_mat_clear(prod);
fmpz_mat_clear(check);
exit_det:
fmpz_mat_clear(adjugate);
fmpz_clear(det);
cryptorand_clear(randstate);
}
void fmpz_modp_matrix_det(fmpz_t det, fmpz_mat_t a, int n, fmpz_t p) {
assert(n >= 1);
if(n == 1) {
fmpz_set(det, fmpz_mat_entry(a, 0, 0));
return;
}
if (n == 2) {
fmpz_t tmp1;
fmpz_init(tmp1);
fmpz_mul(tmp1, fmpz_mat_entry(a,0,0), fmpz_mat_entry(a,1,1));
fmpz_mod(tmp1, tmp1, p);
fmpz_t tmp2;
fmpz_init(tmp2);
fmpz_mul(tmp2, fmpz_mat_entry(a,1,0), fmpz_mat_entry(a,0,1));
fmpz_mod(tmp2, tmp2, p);
fmpz_sub(det, tmp1, tmp2);
fmpz_mod(det, det, p);
fmpz_clear(tmp1);
fmpz_clear(tmp2);
return;
}
fmpz_mat_t m;
fmpz_mat_init_set(m, a);
fmpz_t tmp;
fmpz_init(tmp);
fmpz_t multfactor;
fmpz_init(multfactor);
int num_swaps = 0;
for(int j = 0; j < n; j++) {
for(int i = j+1; i < n; i++) {
if(fmpz_is_zero(fmpz_mat_entry(m, j, j))) {
// find first row that isn't a zero, and swap
int h;
for(h = j+1; h < n; h++) {
if(!fmpz_is_zero(fmpz_mat_entry(m, h, j))) {
// found the row
break;
}
}
if(h == n) {
// matrix is not invertible
fmpz_set_ui(det, 0);
fmpz_clear(multfactor);
fmpz_clear(tmp);
fmpz_mat_clear(m);
return;
}
// swap row h with row j
for(int k = 0; k < n; k++) {
fmpz_set(tmp, fmpz_mat_entry(m, h, k));
fmpz_set(fmpz_mat_entry(m, h, k), fmpz_mat_entry(m, j, k));
fmpz_set(fmpz_mat_entry(m, j, k), tmp);
}
num_swaps++;
}
fmpz_invmod(multfactor, fmpz_mat_entry(m, j, j), p);
fmpz_mul(multfactor, multfactor, fmpz_mat_entry(m, i, j));
fmpz_mod(multfactor, multfactor, p);
#pragma omp parallel for
for(int k = j; k < n; k++) {
fmpz_t tmp2;
fmpz_init(tmp2);
fmpz_mul(tmp2, fmpz_mat_entry(m, j, k), multfactor);
fmpz_sub(fmpz_mat_entry(m, i, k), fmpz_mat_entry(m, i, k), tmp2);
fmpz_mod(fmpz_mat_entry(m, i, k), fmpz_mat_entry(m, i, k), p);
fmpz_clear(tmp2);
}
}
}
fmpz_clear(multfactor);
fmpz_clear(tmp);
fmpz_set_ui(det, 1);
for(int j = 0; j < n; j++) {
fmpz_mul(det, det, fmpz_mat_entry(m, j, j));
}
if(num_swaps % 2 == 1) {
fmpz_neg(det, det);
}
fmpz_mod(det, det, p);
fmpz_mat_clear(m);
}
void fmpz_modp_matrix_adjugate(fmpz_mat_t b, fmpz_mat_t a, int n, fmpz_t p) {
if(n == 1) {
fmpz_set_ui(fmpz_mat_entry(b, 0, 0), 1);
return;
}
fmpz_t det;
fmpz_init(det);
fmpz_mat_t c;
fmpz_mat_init(c, n-1, n-1);
for (int j = 0; j < n; j++) {
for (int i = 0; i < n; i++) {
/* Form the adjoint a_ij */
for (int i_iter = 0, i1 = 0; i_iter < n; i_iter++, i1++) {
if (i_iter == i) {
i1--;
continue;
}
for (int j_iter = 0, j1 = 0; j_iter < n; j_iter++, j1++) {
if (j_iter == j) {
j1--;
continue;
}
fmpz_set(fmpz_mat_entry(c, i1, j1), fmpz_mat_entry(a, i_iter, j_iter));
}
}
/* Calculate the determinant */
fmpz_modp_matrix_det(det, c, n-1, p);
/* Fill in the elements of the adjugate */
if((i+j) % 2 == 1) {
fmpz_negmod(det, det, p);
}
fmpz_mod(det, det, p);
fmpz_set(fmpz_mat_entry(b, i, j), det);
}
}
fmpz_clear(det);
fmpz_mat_clear(c);
}
|
total.h | #pragma once
#include <gms/common/types.h>
#include <cassert>
namespace GMS::TriangleCount::Par {
template<class SGraph>
size_t count_total(const SGraph &graph) {
size_t n = graph.num_nodes();
size_t total = 0;
#pragma omp parallel for schedule(static, 17) reduction(+:total)
for (NodeId u = 0; u < n; ++u) {
const auto &neigh_u = graph.out_neigh(u);
for (NodeId v : neigh_u) {
if (u < v) {
total += neigh_u.intersect_count(graph.out_neigh(v));
}
}
}
assert(total % 3 == 0);
return total / 3;
}
} |
gsrb.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX_THREADS 256
//------------------------------------------------------------------------------------------------------------------------------
void __box_smooth_GSRB_multiple(box_type *box, int phi_id, int rhs_id, double a, double b, int sweep){
int pencil = box->pencil;
int plane = box->plane;
int ghosts = box->ghosts;
int DimI = box->dim.i;
int DimJ = box->dim.j;
int DimK = box->dim.k;
double h2inv = 1.0/(box->h*box->h);
double * __restrict__ phi = box->grids[ phi_id] + ghosts*plane;
double * __restrict__ rhs = box->grids[ rhs_id] + ghosts*plane;
double * __restrict__ alpha = box->grids[__alpha ] + ghosts*plane;
double * __restrict__ beta_i = box->grids[__beta_i] + ghosts*plane;
double * __restrict__ beta_j = box->grids[__beta_j] + ghosts*plane;
double * __restrict__ beta_k = box->grids[__beta_k] + ghosts*plane;
double * __restrict__ lambda = box->grids[__lambda] + ghosts*plane;
uint64_t* __restrict__ RedBlackMask = box->RedBlack_64bMask;
const __m256d a_splat4 = _mm256_broadcast_sd(&a);
const __m256d b_h2inv_splat4 = _mm256_mul_pd(_mm256_broadcast_sd(&b),_mm256_broadcast_sd(&h2inv));
int global_ij_start[8];
int global_ij_end[8];
int ij_start[8];
int ij_end[8];
int planeInWavefront;for(planeInWavefront=0;planeInWavefront<ghosts;planeInWavefront++){
global_ij_start[planeInWavefront] = ( (1+planeInWavefront)*pencil)&~3;
global_ij_end[planeInWavefront] = ((ghosts+DimJ+ghosts-1-planeInWavefront)*pencil);
ij_start[planeInWavefront] = global_ij_start[planeInWavefront];
ij_end[planeInWavefront] = global_ij_end[planeInWavefront];
}
#if defined(__PREFETCH_NEXT_PLANE_FROM_DRAM)
double * __restrict__ DRAM_PREFETCH_POINTERS[20];
DRAM_PREFETCH_POINTERS[0] = phi+plane-pencil;
DRAM_PREFETCH_POINTERS[1] = beta_k+plane ;
DRAM_PREFETCH_POINTERS[2] = beta_j ;
DRAM_PREFETCH_POINTERS[3] = beta_i ;
DRAM_PREFETCH_POINTERS[4] = alpha ;
DRAM_PREFETCH_POINTERS[5] = rhs ;
DRAM_PREFETCH_POINTERS[6] = lambda ;
#endif
int leadingK;
int kLow = -(ghosts-1);
int kHigh = DimK+(ghosts-1);
for(leadingK=kLow;leadingK<kHigh;leadingK++){
#if defined(__PREFETCH_NEXT_PLANE_FROM_DRAM)
int DRAM_prefetch_stream=0;
if(leadingK>=(kHigh-1))DRAM_prefetch_stream=7; // don't prefetch next plane when on last plane
int DRAM_prefetch_ijk_start = ij_start[0] + (leadingK+1)*plane;
int DRAM_prefetch_ijk_end = ij_end[0] + (leadingK+1)*plane;
int DRAM_prefetch_ijk = DRAM_prefetch_ijk_start;
#endif
for(planeInWavefront=0;planeInWavefront<ghosts;planeInWavefront++){
int k=(leadingK-planeInWavefront);
if(k>=kLow){
uint64_t invertMask = 0-((k^planeInWavefront^sweep^1)&0x1);
double * __restrict__ RedBlackFP = box->RedBlack_FP[(k^planeInWavefront^sweep^1)&0x1];
const __m256d invertMask4 = _mm256_broadcast_sd((double*)&invertMask);
int kplane=k*plane;
int ij = ij_start[planeInWavefront];
int _ij_end = ij_end[ planeInWavefront];
int ijk=ij+kplane;
while(ij<_ij_end){ // smooth a vector...
#if defined(__PREFETCH_NEXT_PLANE_FROM_DRAM)
#warning will attempt to prefetch the next plane from DRAM one component at a time
if(DRAM_prefetch_stream<7){
double * _base = DRAM_PREFETCH_POINTERS[DRAM_prefetch_stream] + DRAM_prefetch_ijk;
_mm_prefetch((const char*)(_base+ 0),_MM_HINT_T1);
_mm_prefetch((const char*)(_base+ 8),_MM_HINT_T1);
DRAM_prefetch_ijk+=14;
if(DRAM_prefetch_ijk>DRAM_prefetch_ijk_end){DRAM_prefetch_stream++;DRAM_prefetch_ijk=DRAM_prefetch_ijk_start;}
}
#endif
#if 1 // this version performs alligned accesses for phi+/-1, but not betai+1 or phi+/-pencil
__m256d helmholtz_00;
__m256d helmholtz_04;
_mm_prefetch((const char*)( phi+ijk+2+8),_MM_HINT_T0);
const __m128d temp_00 = _mm_load_pd(phi+ijk+ -2);
const __m128d temp_02 = _mm_load_pd(phi+ijk+ 0);
const __m128d temp_01 = _mm_shuffle_pd(temp_00,temp_02,1);
const __m128d temp_04 = _mm_load_pd(phi+ijk+ 2);
const __m128d temp_06 = _mm_load_pd(phi+ijk+ 4);
const __m128d temp_03 = _mm_shuffle_pd(temp_02,temp_04,1);
const __m128d temp_05 = _mm_shuffle_pd(temp_04,temp_06,1);
const __m256d phi_00 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_02),temp_04,1);
const __m256d phi_m1 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_01),temp_03,1);
const __m256d phi_01 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_03),temp_05,1);
const __m128d temp_08 = _mm_load_pd(phi+ijk+ 6);
const __m128d temp_10 = _mm_load_pd(phi+ijk+ 8);
const __m128d temp_07 = _mm_shuffle_pd(temp_06,temp_08,1);
const __m128d temp_09 = _mm_shuffle_pd(temp_08,temp_10,1);
const __m256d phi_04 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_06),temp_08,1);
const __m256d phi_03 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_05),temp_07,1);
const __m256d phi_05 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_07),temp_09,1);
_mm_prefetch((const char*)(beta_i+ijk+1+8),_MM_HINT_T0);
helmholtz_00 = _mm256_mul_pd(_mm256_sub_pd(phi_01,phi_00),_mm256_loadu_pd(beta_i+ijk+ 1));
helmholtz_04 = _mm256_mul_pd(_mm256_sub_pd(phi_05,phi_04),_mm256_loadu_pd(beta_i+ijk+ 5));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd(phi_00,phi_m1),_mm256_load_pd( beta_i+ijk+ 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd(phi_04,phi_03),_mm256_load_pd( beta_i+ijk+ 4)));
_mm_prefetch((const char*)( phi+ijk-pencil+8),_MM_HINT_T0);
_mm_prefetch((const char*)( phi+ijk+pencil+8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_j+ijk +8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_j+ijk+pencil+8),_MM_HINT_T0);
//careful... assumes the compiler maps _mm256_load_pd to unaligned vmovupd and not the aligned version (should be faster when pencil is a multiple of 4 doubles (32 bytes)
helmholtz_00 = _mm256_add_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+pencil+ 0), phi_00 ),_mm256_load_pd( beta_j+ijk+pencil+ 0)));
helmholtz_04 = _mm256_add_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+pencil+ 4), phi_04 ),_mm256_load_pd( beta_j+ijk+pencil+ 4)));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd( phi_00 ,_mm256_load_pd( phi+ijk-pencil+ 0)),_mm256_load_pd( beta_j+ijk+ 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd( phi_04 ,_mm256_load_pd( phi+ijk-pencil+ 4)),_mm256_load_pd( beta_j+ijk+ 4)));
_mm_prefetch((const char*)( phi+ijk-plane+8),_MM_HINT_T0);
_mm_prefetch((const char*)( phi+ijk+plane+8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_k+ijk +8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_k+ijk+plane+8),_MM_HINT_T0);
helmholtz_00 = _mm256_add_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+ plane+ 0), phi_00 ),_mm256_load_pd( beta_k+ijk+ plane+ 0)));
helmholtz_04 = _mm256_add_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+ plane+ 4), phi_04 ),_mm256_load_pd( beta_k+ijk+ plane+ 4)));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd( phi_00 ,_mm256_load_pd( phi+ijk- plane+ 0)),_mm256_load_pd( beta_k+ijk + 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd( phi_04 ,_mm256_load_pd( phi+ijk- plane+ 4)),_mm256_load_pd( beta_k+ijk + 4)));
#else // this version performs unalligned accesses for phi+/-1, betai+1 and phi+/-pencil
__m256d helmholtz_00;
__m256d helmholtz_04;
//careful... assumes the compiler maps _mm256_load_pd to unaligned vmovupd and not the aligned version (should be faster when pencil is a multiple of 4 doubles (32 bytes)
_mm_prefetch((const char*)( phi+ijk+1+8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_i+ijk+1+8),_MM_HINT_T0);
const __m256d phi_00 = _mm256_load_pd(phi+ijk+ 0);
const __m256d phi_04 = _mm256_load_pd(phi+ijk+ 4);
helmholtz_00 = _mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd(phi+ijk+ 1), phi_00 ),_mm256_load_pd(beta_i+ijk+ 1));
helmholtz_04 = _mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd(phi+ijk+ 5), phi_04 ),_mm256_load_pd(beta_i+ijk+ 5));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd( phi_00 ,_mm256_load_pd(phi+ijk+ -1)),_mm256_load_pd(beta_i+ijk+ 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd( phi_04 ,_mm256_load_pd(phi+ijk+ 3)),_mm256_load_pd(beta_i+ijk+ 4)));
_mm_prefetch((const char*)( phi+ijk-pencil+8),_MM_HINT_T0);
_mm_prefetch((const char*)( phi+ijk+pencil+8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_j+ijk +8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_j+ijk+pencil+8),_MM_HINT_T0);
helmholtz_00 = _mm256_add_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd(_mm256_loadu_pd( phi+ijk+pencil+ 0), phi_00 ),_mm256_loadu_pd( beta_j+ijk+pencil+ 0)));
helmholtz_04 = _mm256_add_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd(_mm256_loadu_pd( phi+ijk+pencil+ 4), phi_04 ),_mm256_loadu_pd( beta_j+ijk+pencil+ 4)));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd( phi_00 ,_mm256_loadu_pd( phi+ijk-pencil+ 0)),_mm256_load_pd( beta_j+ijk+ 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd( phi_04 ,_mm256_loadu_pd( phi+ijk-pencil+ 4)),_mm256_load_pd( beta_j+ijk+ 4)));
_mm_prefetch((const char*)( phi+ijk-plane+8),_MM_HINT_T0);
_mm_prefetch((const char*)( phi+ijk+plane+8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_k+ijk +8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_k+ijk+plane+8),_MM_HINT_T0);
helmholtz_00 = _mm256_add_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+ plane+ 0), phi_00 ),_mm256_load_pd( beta_k+ijk+ plane+ 0)));
helmholtz_04 = _mm256_add_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+ plane+ 4), phi_04 ),_mm256_load_pd( beta_k+ijk+ plane+ 4)));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd( phi_00 ,_mm256_load_pd( phi+ijk- plane+ 0)),_mm256_load_pd( beta_k+ijk + 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd( phi_04 ,_mm256_load_pd( phi+ijk- plane+ 4)),_mm256_load_pd( beta_k+ijk + 4)));
#endif
#ifdef __GSRB_FP
#warning GSRB using precomputed 64b FP array for Red-Black
_mm_prefetch((const char*)( alpha+ijk+8),_MM_HINT_T0);
_mm_prefetch((const char*)( rhs+ijk+8),_MM_HINT_T0);
_mm_prefetch((const char*)( lambda+ijk+8),_MM_HINT_T0);
helmholtz_00 = _mm256_mul_pd(helmholtz_00,b_h2inv_splat4);
helmholtz_04 = _mm256_mul_pd(helmholtz_04,b_h2inv_splat4);
helmholtz_00 = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(a_splat4,_mm256_load_pd(alpha+ijk+ 0)),phi_00),helmholtz_00);
helmholtz_04 = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(a_splat4,_mm256_load_pd(alpha+ijk+ 4)),phi_04),helmholtz_04);
__m256d new_00 = _mm256_mul_pd(_mm256_load_pd(lambda+ijk+ 0),_mm256_sub_pd(helmholtz_00,_mm256_load_pd(rhs+ijk+ 0)));
__m256d new_04 = _mm256_mul_pd(_mm256_load_pd(lambda+ijk+ 4),_mm256_sub_pd(helmholtz_04,_mm256_load_pd(rhs+ijk+ 4)));
const __m256d RedBlack_00 = _mm256_load_pd(RedBlackFP+ij+ 0);
const __m256d RedBlack_04 = _mm256_load_pd(RedBlackFP+ij+ 4);
new_00 = _mm256_sub_pd(phi_00,_mm256_mul_pd(RedBlack_00,new_00));
new_04 = _mm256_sub_pd(phi_04,_mm256_mul_pd(RedBlack_04,new_04));
ij+=8;
_mm256_store_pd(phi+ijk+ 0,new_00);
_mm256_store_pd(phi+ijk+ 4,new_04);
ijk+=8;
#else
#warning GSRB using precomputed 64b integer mask array for Red-Black
_mm_prefetch((const char*)( alpha+ijk+8),_MM_HINT_T0);
_mm_prefetch((const char*)( rhs+ijk+8),_MM_HINT_T0);
_mm_prefetch((const char*)( lambda+ijk+8),_MM_HINT_T0);
helmholtz_00 = _mm256_mul_pd(helmholtz_00,b_h2inv_splat4);
helmholtz_04 = _mm256_mul_pd(helmholtz_04,b_h2inv_splat4);
helmholtz_00 = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(a_splat4,_mm256_load_pd(alpha+ijk+ 0)),phi_00),helmholtz_00);
helmholtz_04 = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(a_splat4,_mm256_load_pd(alpha+ijk+ 4)),phi_04),helmholtz_04);
__m256d new_00 = _mm256_mul_pd(_mm256_load_pd(lambda+ijk+ 0),_mm256_sub_pd(helmholtz_00,_mm256_load_pd(rhs+ijk+ 0)));
__m256d new_04 = _mm256_mul_pd(_mm256_load_pd(lambda+ijk+ 4),_mm256_sub_pd(helmholtz_04,_mm256_load_pd(rhs+ijk+ 4)));
new_00 = _mm256_sub_pd(phi_00,new_00);
new_04 = _mm256_sub_pd(phi_04,new_04);
const __m256d RedBlack_00 = _mm256_xor_pd(invertMask4,_mm256_load_pd((double*)(RedBlackMask+ij+ 0)));
const __m256d RedBlack_04 = _mm256_xor_pd(invertMask4,_mm256_load_pd((double*)(RedBlackMask+ij+ 4)));
ij+=8;
_mm256_store_pd(phi+ijk+ 0,_mm256_blendv_pd(phi_00,new_00,RedBlack_00));
_mm256_store_pd(phi+ijk+ 4,_mm256_blendv_pd(phi_04,new_04,RedBlack_04));
ijk+=8;
#endif
}
} // active plane
}
} // leadingK
}
void __box_smooth_GSRB_multiple_threaded(box_type *box, int phi_id, int rhs_id, double a, double b, int sweep){
volatile int64_t KPlaneFinishedByThread[MAX_THREADS];
#pragma omp parallel shared(KPlaneFinishedByThread)
{
int pencil = box->pencil;
int plane = box->plane;
int ghosts = box->ghosts;
int DimI = box->dim.i;
int DimJ = box->dim.j;
int DimK = box->dim.k;
double h2inv = 1.0/(box->h*box->h);
double * __restrict__ phi = box->grids[ phi_id] + ghosts*plane;
double * __restrict__ rhs = box->grids[ rhs_id] + ghosts*plane;
double * __restrict__ alpha = box->grids[__alpha ] + ghosts*plane;
double * __restrict__ beta_i = box->grids[__beta_i] + ghosts*plane;
double * __restrict__ beta_j = box->grids[__beta_j] + ghosts*plane;
double * __restrict__ beta_k = box->grids[__beta_k] + ghosts*plane;
double * __restrict__ lambda = box->grids[__lambda] + ghosts*plane;
uint64_t* __restrict__ RedBlackMask = box->RedBlack_64bMask;
const __m256d a_splat4 = _mm256_broadcast_sd(&a);
const __m256d b_h2inv_splat4 = _mm256_mul_pd(_mm256_broadcast_sd(&b),_mm256_broadcast_sd(&h2inv));
int id = omp_get_thread_num();
int threads = omp_get_num_threads();
// only works if (ij_end-ij_start)>=pencil;
int left = MAX( 0,id-1);
int right = MIN(threads-1,id+1);
if(ghosts==1){right=id;left=id;}
if(ghosts>1){
KPlaneFinishedByThread[id]=-100;
#pragma omp barrier
}
int global_ij_start[8];
int global_ij_end[8];
int ij_start[8];
int ij_end[8];
int planeInWavefront=0;
global_ij_start[planeInWavefront] = ( (1)*pencil)&~3;
global_ij_end[ planeInWavefront] = ((ghosts+DimJ+ghosts-1)*pencil);
int TotalUnrollings = ((global_ij_end[planeInWavefront]-global_ij_start[planeInWavefront]+8-1)/8);
ij_start[planeInWavefront] = global_ij_start[planeInWavefront] + 8*( (id )*(TotalUnrollings)/(threads));
ij_end[ planeInWavefront] = global_ij_start[planeInWavefront] + 8*( (id+1 )*(TotalUnrollings)/(threads));
if(ij_end[planeInWavefront]>global_ij_end[planeInWavefront])ij_end[planeInWavefront]=global_ij_end[planeInWavefront];
for(planeInWavefront=1;planeInWavefront<ghosts;planeInWavefront++){
ij_start[planeInWavefront] = ij_start[0];
ij_end[ planeInWavefront] = ij_end[0];
}
#if defined(__PREFETCH_NEXT_PLANE_FROM_DRAM)
double * __restrict__ DRAM_PREFETCH_POINTERS[20];
DRAM_PREFETCH_POINTERS[0] = phi+plane-pencil;
DRAM_PREFETCH_POINTERS[1] = beta_k+plane ;
DRAM_PREFETCH_POINTERS[2] = beta_j ;
DRAM_PREFETCH_POINTERS[3] = beta_i ;
DRAM_PREFETCH_POINTERS[4] = alpha ;
DRAM_PREFETCH_POINTERS[5] = rhs ;
DRAM_PREFETCH_POINTERS[6] = lambda ;
#endif
int leadingK;
int kLow = -(ghosts-1);
int kHigh = DimK+(ghosts-1);
for(leadingK=kLow;leadingK<kHigh;leadingK++){
#if defined(__PREFETCH_NEXT_PLANE_FROM_DRAM)
int DRAM_prefetch_stream=0;
if(leadingK>=(kHigh-1))DRAM_prefetch_stream=7; // don't prefetch next plane when on last plane
int DRAM_prefetch_ijk_start = ij_start[0] + (leadingK+1)*plane;
int DRAM_prefetch_ijk_end = ij_end[0] + (leadingK+1)*plane;
int DRAM_prefetch_ijk = DRAM_prefetch_ijk_start;
#endif
for(planeInWavefront=0;planeInWavefront<ghosts;planeInWavefront++){
int k=(leadingK-planeInWavefront);
if(k>=kLow){
uint64_t invertMask = 0-((k^planeInWavefront^sweep^1)&0x1);
double * __restrict__ RedBlackFP = box->RedBlack_FP[(k^planeInWavefront^sweep^1)&0x1];
const __m256d invertMask4 = _mm256_broadcast_sd((double*)&invertMask);
int kplane=k*plane;
int ij = ij_start[planeInWavefront];
int _ij_end = ij_end[ planeInWavefront];
int ijk=ij+kplane;
while(ij<_ij_end){ // smooth a vector...
#if defined(__PREFETCH_NEXT_PLANE_FROM_DRAM)
#warning will attempt to prefetch the next plane from DRAM one component at a time
if(DRAM_prefetch_stream<7){
double * _base = DRAM_PREFETCH_POINTERS[DRAM_prefetch_stream] + DRAM_prefetch_ijk;
_mm_prefetch((const char*)(_base+ 0),_MM_HINT_T1);
_mm_prefetch((const char*)(_base+ 8),_MM_HINT_T1);
DRAM_prefetch_ijk+=14;
if(DRAM_prefetch_ijk>DRAM_prefetch_ijk_end){DRAM_prefetch_stream++;DRAM_prefetch_ijk=DRAM_prefetch_ijk_start;}
}
#endif
#if 1 // this version performs alligned accesses for phi+/-1, but not betai+1 or phi+/-pencil
__m256d helmholtz_00;
__m256d helmholtz_04;
_mm_prefetch((const char*)( phi+ijk+2+8),_MM_HINT_T0);
const __m128d temp_00 = _mm_load_pd(phi+ijk+ -2);
const __m128d temp_02 = _mm_load_pd(phi+ijk+ 0);
const __m128d temp_01 = _mm_shuffle_pd(temp_00,temp_02,1);
const __m128d temp_04 = _mm_load_pd(phi+ijk+ 2);
const __m128d temp_06 = _mm_load_pd(phi+ijk+ 4);
const __m128d temp_03 = _mm_shuffle_pd(temp_02,temp_04,1);
const __m128d temp_05 = _mm_shuffle_pd(temp_04,temp_06,1);
const __m256d phi_00 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_02),temp_04,1);
const __m256d phi_m1 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_01),temp_03,1);
const __m256d phi_01 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_03),temp_05,1);
const __m128d temp_08 = _mm_load_pd(phi+ijk+ 6);
const __m128d temp_10 = _mm_load_pd(phi+ijk+ 8);
const __m128d temp_07 = _mm_shuffle_pd(temp_06,temp_08,1);
const __m128d temp_09 = _mm_shuffle_pd(temp_08,temp_10,1);
const __m256d phi_04 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_06),temp_08,1);
const __m256d phi_03 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_05),temp_07,1);
const __m256d phi_05 = _mm256_insertf128_pd(_mm256_castpd128_pd256(temp_07),temp_09,1);
_mm_prefetch((const char*)(beta_i+ijk+1+8),_MM_HINT_T0);
helmholtz_00 = _mm256_mul_pd(_mm256_sub_pd(phi_01,phi_00),_mm256_loadu_pd(beta_i+ijk+ 1));
helmholtz_04 = _mm256_mul_pd(_mm256_sub_pd(phi_05,phi_04),_mm256_loadu_pd(beta_i+ijk+ 5));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd(phi_00,phi_m1),_mm256_load_pd( beta_i+ijk+ 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd(phi_04,phi_03),_mm256_load_pd( beta_i+ijk+ 4)));
_mm_prefetch((const char*)( phi+ijk-pencil+8),_MM_HINT_T0);
_mm_prefetch((const char*)( phi+ijk+pencil+8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_j+ijk +8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_j+ijk+pencil+8),_MM_HINT_T0);
//careful... assumes the compiler maps _mm256_load_pd to unaligned vmovupd and not the aligned version (should be faster when pencil is a multiple of 4 doubles (32 bytes)
helmholtz_00 = _mm256_add_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+pencil+ 0), phi_00 ),_mm256_load_pd( beta_j+ijk+pencil+ 0)));
helmholtz_04 = _mm256_add_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+pencil+ 4), phi_04 ),_mm256_load_pd( beta_j+ijk+pencil+ 4)));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd( phi_00 ,_mm256_load_pd( phi+ijk-pencil+ 0)),_mm256_load_pd( beta_j+ijk+ 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd( phi_04 ,_mm256_load_pd( phi+ijk-pencil+ 4)),_mm256_load_pd( beta_j+ijk+ 4)));
_mm_prefetch((const char*)( phi+ijk-plane+8),_MM_HINT_T0);
_mm_prefetch((const char*)( phi+ijk+plane+8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_k+ijk +8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_k+ijk+plane+8),_MM_HINT_T0);
helmholtz_00 = _mm256_add_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+ plane+ 0), phi_00 ),_mm256_load_pd( beta_k+ijk+ plane+ 0)));
helmholtz_04 = _mm256_add_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+ plane+ 4), phi_04 ),_mm256_load_pd( beta_k+ijk+ plane+ 4)));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd( phi_00 ,_mm256_load_pd( phi+ijk- plane+ 0)),_mm256_load_pd( beta_k+ijk + 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd( phi_04 ,_mm256_load_pd( phi+ijk- plane+ 4)),_mm256_load_pd( beta_k+ijk + 4)));
#else // this version performs unalligned accesses for phi+/-1, betai+1 and phi+/-pencil
__m256d helmholtz_00;
__m256d helmholtz_04;
//careful... assumes the compiler maps _mm256_load_pd to unaligned vmovupd and not the aligned version (should be faster when pencil is a multiple of 4 doubles (32 bytes)
_mm_prefetch((const char*)( phi+ijk+1+8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_i+ijk+1+8),_MM_HINT_T0);
const __m256d phi_00 = _mm256_load_pd(phi+ijk+ 0);
const __m256d phi_04 = _mm256_load_pd(phi+ijk+ 4);
helmholtz_00 = _mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd(phi+ijk+ 1), phi_00 ),_mm256_load_pd(beta_i+ijk+ 1));
helmholtz_04 = _mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd(phi+ijk+ 5), phi_04 ),_mm256_load_pd(beta_i+ijk+ 5));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd( phi_00 ,_mm256_load_pd(phi+ijk+ -1)),_mm256_load_pd(beta_i+ijk+ 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd( phi_04 ,_mm256_load_pd(phi+ijk+ 3)),_mm256_load_pd(beta_i+ijk+ 4)));
_mm_prefetch((const char*)( phi+ijk-pencil+8),_MM_HINT_T0);
_mm_prefetch((const char*)( phi+ijk+pencil+8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_j+ijk +8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_j+ijk+pencil+8),_MM_HINT_T0);
helmholtz_00 = _mm256_add_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd(_mm256_loadu_pd( phi+ijk+pencil+ 0), phi_00 ),_mm256_loadu_pd( beta_j+ijk+pencil+ 0)));
helmholtz_04 = _mm256_add_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd(_mm256_loadu_pd( phi+ijk+pencil+ 4), phi_04 ),_mm256_loadu_pd( beta_j+ijk+pencil+ 4)));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd( phi_00 ,_mm256_loadu_pd( phi+ijk-pencil+ 0)),_mm256_load_pd( beta_j+ijk+ 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd( phi_04 ,_mm256_loadu_pd( phi+ijk-pencil+ 4)),_mm256_load_pd( beta_j+ijk+ 4)));
_mm_prefetch((const char*)( phi+ijk-plane+8),_MM_HINT_T0);
_mm_prefetch((const char*)( phi+ijk+plane+8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_k+ijk +8),_MM_HINT_T0);
_mm_prefetch((const char*)(beta_k+ijk+plane+8),_MM_HINT_T0);
helmholtz_00 = _mm256_add_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+ plane+ 0), phi_00 ),_mm256_load_pd( beta_k+ijk+ plane+ 0)));
helmholtz_04 = _mm256_add_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd(_mm256_load_pd( phi+ijk+ plane+ 4), phi_04 ),_mm256_load_pd( beta_k+ijk+ plane+ 4)));
helmholtz_00 = _mm256_sub_pd(helmholtz_00,_mm256_mul_pd(_mm256_sub_pd( phi_00 ,_mm256_load_pd( phi+ijk- plane+ 0)),_mm256_load_pd( beta_k+ijk + 0)));
helmholtz_04 = _mm256_sub_pd(helmholtz_04,_mm256_mul_pd(_mm256_sub_pd( phi_04 ,_mm256_load_pd( phi+ijk- plane+ 4)),_mm256_load_pd( beta_k+ijk + 4)));
#endif
#ifdef __GSRB_FP
#warning GSRB using precomputed 64b FP array for Red-Black
_mm_prefetch((const char*)( alpha+ijk+8),_MM_HINT_T0);
_mm_prefetch((const char*)( rhs+ijk+8),_MM_HINT_T0);
_mm_prefetch((const char*)( lambda+ijk+8),_MM_HINT_T0);
helmholtz_00 = _mm256_mul_pd(helmholtz_00,b_h2inv_splat4);
helmholtz_04 = _mm256_mul_pd(helmholtz_04,b_h2inv_splat4);
helmholtz_00 = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(a_splat4,_mm256_load_pd(alpha+ijk+ 0)),phi_00),helmholtz_00);
helmholtz_04 = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(a_splat4,_mm256_load_pd(alpha+ijk+ 4)),phi_04),helmholtz_04);
__m256d new_00 = _mm256_mul_pd(_mm256_load_pd(lambda+ijk+ 0),_mm256_sub_pd(helmholtz_00,_mm256_load_pd(rhs+ijk+ 0)));
__m256d new_04 = _mm256_mul_pd(_mm256_load_pd(lambda+ijk+ 4),_mm256_sub_pd(helmholtz_04,_mm256_load_pd(rhs+ijk+ 4)));
const __m256d RedBlack_00 = _mm256_load_pd(RedBlackFP+ij+ 0);
const __m256d RedBlack_04 = _mm256_load_pd(RedBlackFP+ij+ 4);
new_00 = _mm256_sub_pd(phi_00,_mm256_mul_pd(RedBlack_00,new_00));
new_04 = _mm256_sub_pd(phi_04,_mm256_mul_pd(RedBlack_04,new_04));
ij+=8;
_mm256_store_pd(phi+ijk+ 0,new_00);
_mm256_store_pd(phi+ijk+ 4,new_04);
ijk+=8;
#else
#warning GSRB using precomputed 64b integer mask array for Red-Black
_mm_prefetch((const char*)( alpha+ijk+8),_MM_HINT_T0);
_mm_prefetch((const char*)( rhs+ijk+8),_MM_HINT_T0);
_mm_prefetch((const char*)( lambda+ijk+8),_MM_HINT_T0);
helmholtz_00 = _mm256_mul_pd(helmholtz_00,b_h2inv_splat4);
helmholtz_04 = _mm256_mul_pd(helmholtz_04,b_h2inv_splat4);
helmholtz_00 = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(a_splat4,_mm256_load_pd(alpha+ijk+ 0)),phi_00),helmholtz_00);
helmholtz_04 = _mm256_sub_pd(_mm256_mul_pd(_mm256_mul_pd(a_splat4,_mm256_load_pd(alpha+ijk+ 4)),phi_04),helmholtz_04);
__m256d new_00 = _mm256_mul_pd(_mm256_load_pd(lambda+ijk+ 0),_mm256_sub_pd(helmholtz_00,_mm256_load_pd(rhs+ijk+ 0)));
__m256d new_04 = _mm256_mul_pd(_mm256_load_pd(lambda+ijk+ 4),_mm256_sub_pd(helmholtz_04,_mm256_load_pd(rhs+ijk+ 4)));
new_00 = _mm256_sub_pd(phi_00,new_00);
new_04 = _mm256_sub_pd(phi_04,new_04);
const __m256d RedBlack_00 = _mm256_xor_pd(invertMask4,_mm256_load_pd((double*)(RedBlackMask+ij+ 0)));
const __m256d RedBlack_04 = _mm256_xor_pd(invertMask4,_mm256_load_pd((double*)(RedBlackMask+ij+ 4)));
ij+=8;
_mm256_store_pd(phi+ijk+ 0,_mm256_blendv_pd(phi_00,new_00,RedBlack_00));
_mm256_store_pd(phi+ijk+ 4,_mm256_blendv_pd(phi_04,new_04,RedBlack_04));
ijk+=8;
#endif
}
} // active plane
}
if(ghosts>1){
KPlaneFinishedByThread[id]=leadingK;
while( (KPlaneFinishedByThread[left ]<leadingK) || (KPlaneFinishedByThread[right]<leadingK) ){_mm_pause();}; // pause() in case HT is in use...
}
} // leadingK
} // omp parallel region
}
//==================================================================================================
void smooth(domain_type * domain, int level, int phi_id, int rhs_id, double a, double b){
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
uint64_t _timeStart = CycleTime();
int box,s;
int ghosts = domain->ghosts;
// if communication-avoiding, need RHS for stencils in ghost zones
if(ghosts>1)exchange_boundary(domain,level,rhs_id,1,1,1);
for(s=0;s<numSmooths;s+=ghosts){
exchange_boundary(domain,level,phi_id,1,ghosts>1,ghosts>1); // corners/edges if doing communication-avoiding...
if(domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize){
uint64_t _timeStart = CycleTime();
for(box=0;box<domain->subdomains_per_rank;box++){__box_smooth_GSRB_multiple_threaded(&domain->subdomains[box].levels[level],phi_id,rhs_id,a,b,s);}
domain->cycles.smooth[level] += (uint64_t)(CycleTime()-_timeStart);
}else{
// now do ghosts communication-avoiding smooths on each box...
uint64_t _timeStart = CycleTime();
#pragma omp parallel for private(box)
for(box=0;box<domain->subdomains_per_rank;box++){__box_smooth_GSRB_multiple(&domain->subdomains[box].levels[level],phi_id,rhs_id,a,b,s);}
domain->cycles.smooth[level] += (uint64_t)(CycleTime()-_timeStart);
}
}
}
//==================================================================================================
|
core_clanhe.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlanhe.c, normal z -> c, Fri Sep 28 17:38:21 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
#include <math.h>
/******************************************************************************/
__attribute__((weak))
void plasma_core_clanhe(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const plasma_complex32_t *A, int lda,
float *work, float *value)
{
*value = LAPACKE_clanhe_work(LAPACK_COL_MAJOR,
lapack_const(norm),
lapack_const(uplo),
n, A, lda, work);
}
/******************************************************************************/
void plasma_core_omp_clanhe(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const plasma_complex32_t *A, int lda,
float *work, float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:1])
{
if (sequence->status == PlasmaSuccess)
plasma_core_clanhe(norm, uplo, n, A, lda, work, value);
}
}
/******************************************************************************/
void plasma_core_omp_clanhe_aux(plasma_enum_t norm, plasma_enum_t uplo,
int n,
const plasma_complex32_t *A, int lda,
float *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
switch (norm) {
case PlasmaOneNorm:
case PlasmaInfNorm:
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:value[0:n])
{
if (sequence->status == PlasmaSuccess) {
if (uplo == PlasmaUpper) {
for (int i = 0; i < n; i++)
value[i] = 0.0;
for (int j = 0; j < n; j++) {
for (int i = 0; i < j; i++) {
value[i] += cabsf(A[lda*j+i]);
value[j] += cabsf(A[lda*j+i]);
}
value[j] += fabsf(creal(A[lda*j+j]));
}
}
else { // PlasmaLower
for (int i = 0; i < n; i++)
value[i] = 0.0;
for (int j = 0; j < n; j++) {
value[j] += fabsf(creal(A[lda*j+j]));
for (int i = j+1; i < n; i++) {
value[i] += cabsf(A[lda*j+i]);
value[j] += cabsf(A[lda*j+i]);
}
}
}
}
}
break;
}
}
|
convolution_sgemm_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
extern void im2col_sgemm_int8_sse_avx512vnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
extern void im2col_sgemm_int8_sse_avxvnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __SSE2__
if (inch >= 4)
{
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
#endif
}
else
{
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
#endif
}
{
#if __AVX2__
int remain_size_start = 0;
int nn_size = size >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
signed char* tmpptr = tmp.channel(i / 4);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr[8] = img0[2];
tmpptr[9] = img1[2];
tmpptr[10] = img2[2];
tmpptr[11] = img3[2];
tmpptr[12] = img0[3];
tmpptr[13] = img1[3];
tmpptr[14] = img2[3];
tmpptr[15] = img3[3];
tmpptr += 16;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __AVX2__
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
signed char* tmpptr = tmp.channel(i / 2);
#endif
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __AVX2__
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#else // __SSE2__
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
signed char* tmpptr = tmp.channel(i);
int q = 0;
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#endif // __SSE2__
int nn_outch = 0;
int remain_outch_start = 0;
#if __SSE2__
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
__m256i _sum00_12 = _mm256_setzero_si256();
__m256i _sum20_32 = _mm256_setzero_si256();
if (nn4 > 0)
{
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum30_22 = _mm256_setzero_si256();
#else
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum01_13 = _mm256_setzero_si256();
__m256i _sum11_03 = _mm256_setzero_si256();
__m256i _sum30_22 = _mm256_setzero_si256();
__m256i _sum21_33 = _mm256_setzero_si256();
__m256i _sum31_23 = _mm256_setzero_si256();
#endif
int j = 0;
for (; j < nn4; j++)
{
__m128i _val0123 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val0123_16 = _mm256_cvtepi8_epi16(_val0123);
__m256i _val01_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(1, 1, 0, 0));
__m256i _val23_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(3, 3, 2, 2));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
__m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16);
_sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16);
_sum20_32 = _mm256_dpwssd_epi32(_sum20_32, _val23_16, _w01_16);
_sum30_22 = _mm256_dpwssd_epi32(_sum30_22, _val32_16, _w01_16);
#else
__m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16);
__m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16);
__m256i _sl20_31 = _mm256_mullo_epi16(_val23_16, _w01_16);
__m256i _sh20_31 = _mm256_mulhi_epi16(_val23_16, _w01_16);
__m256i _sl30_21 = _mm256_mullo_epi16(_val32_16, _w01_16);
__m256i _sh30_21 = _mm256_mulhi_epi16(_val32_16, _w01_16);
_sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11));
_sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01));
_sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11));
_sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01));
_sum20_32 = _mm256_add_epi32(_sum20_32, _mm256_unpacklo_epi16(_sl20_31, _sh20_31));
_sum30_22 = _mm256_add_epi32(_sum30_22, _mm256_unpacklo_epi16(_sl30_21, _sh30_21));
_sum21_33 = _mm256_add_epi32(_sum21_33, _mm256_unpackhi_epi16(_sl20_31, _sh20_31));
_sum31_23 = _mm256_add_epi32(_sum31_23, _mm256_unpackhi_epi16(_sl30_21, _sh30_21));
#endif
tmpptr += 16;
kptr0 += 16;
}
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02);
_sum20_32 = _mm256_hadd_epi32(_sum20_32, _sum30_22);
__m256i _perm_mask = _mm256_set_epi32(5, 1, 6, 2, 7, 3, 4, 0);
_sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask);
_sum20_32 = _mm256_permutevar8x32_epi32(_sum20_32, _perm_mask);
#else
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02);
_tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02);
_tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum20_32, _sum30_22);
_tmp1 = _mm256_unpacklo_epi32(_sum21_33, _sum31_23);
_tmp2 = _mm256_unpackhi_epi32(_sum20_32, _sum30_22);
_tmp3 = _mm256_unpackhi_epi32(_sum21_33, _sum31_23);
_sum20_32 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum30_22 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum21_33 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum31_23 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02);
_sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13);
_sum20_32 = _mm256_add_epi32(_sum20_32, _sum30_22);
_sum21_33 = _mm256_add_epi32(_sum21_33, _sum31_23);
_sum20_32 = _mm256_add_epi32(_sum20_32, _sum21_33);
__m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0);
_sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask);
_sum20_32 = _mm256_permutevar8x32_epi32(_sum20_32, _perm_mask);
#endif
}
__m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0);
__m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1);
__m128i _sum20 = _mm256_extracti128_si256(_sum20_32, 0);
__m128i _sum30 = _mm256_extracti128_si256(_sum20_32, 1);
int j = 0;
for (; j < nn1; j++)
{
__m128i _val0123 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val0123 = _mm_cvtepi8_epi16(_val0123);
#else
__m128i _extval0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val0123);
_val0123 = _mm_unpacklo_epi8(_val0123, _extval0123);
#endif
__m128i _val01 = _mm_shufflelo_epi16(_val0123, _MM_SHUFFLE(1, 1, 0, 0));
_val01 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _val23 = _mm_shufflelo_epi16(_val0123, _MM_SHUFFLE(3, 3, 2, 2));
_val23 = _mm_shuffle_epi32(_val23, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
_w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _sl00 = _mm_mullo_epi16(_val01, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val01, _w0123);
__m128i _sl10 = _mm_mullo_epi16(_val23, _w0123);
__m128i _sh10 = _mm_mulhi_epi16(_val23, _w0123);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00));
_sum20 = _mm_add_epi32(_sum20, _mm_unpacklo_epi16(_sl10, _sh10));
_sum30 = _mm_add_epi32(_sum30, _mm_unpackhi_epi16(_sl10, _sh10));
tmpptr += 4;
kptr0 += 4;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum10);
_tmp1 = _mm_unpacklo_epi32(_sum20, _sum30);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum10);
_tmp3 = _mm_unpackhi_epi32(_sum20, _sum30);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum10 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum20 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum30 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_mm_storeu_si128((__m128i*)outptr0, _sum00);
_mm_storeu_si128((__m128i*)outptr1, _sum10);
_mm_storeu_si128((__m128i*)outptr2, _sum20);
_mm_storeu_si128((__m128i*)outptr3, _sum30);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
#if __AVX2__
__m256i _sum00_12 = _mm256_setzero_si256();
#else
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
#endif
if (nn4 > 0)
{
#if __AVX2__
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _sum10_02 = _mm256_setzero_si256();
#else
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum01_13 = _mm256_setzero_si256();
__m256i _sum11_03 = _mm256_setzero_si256();
#endif
#else
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum02 = _mm_setzero_si128();
__m128i _sum03 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
__m128i _sum12 = _mm_setzero_si128();
__m128i _sum13 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn4; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
_val01_16 = _mm256_permute4x64_epi64(_val01_16, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16);
_sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16);
#else
__m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16);
__m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16);
_sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11));
_sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01));
_sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11));
_sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01));
#endif
#else
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val01 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
_val01 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
__m128i _val0 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _val1 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(3, 2, 3, 2));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
__m128i _sl10 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh10 = _mm_mulhi_epi16(_val1, _w0);
__m128i _sl11 = _mm_mullo_epi16(_val1, _w1);
__m128i _sh11 = _mm_mulhi_epi16(_val1, _w1);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl00, _sh00));
_sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl01, _sh01));
_sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl01, _sh01));
_sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl10, _sh10));
_sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl11, _sh11));
_sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl11, _sh11));
#endif
tmpptr += 8;
kptr0 += 16;
}
#if __AVX2__
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02);
__m256i _perm_mask = _mm256_set_epi32(5, 1, 6, 2, 7, 3, 4, 0);
_sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask);
#else
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02);
_tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02);
_tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02);
_sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13);
__m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0);
_sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask);
#endif
#else
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum01);
_tmp1 = _mm_unpacklo_epi32(_sum02, _sum03);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum01);
_tmp3 = _mm_unpackhi_epi32(_sum02, _sum03);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum10, _sum11);
_tmp1 = _mm_unpacklo_epi32(_sum12, _sum13);
_tmp2 = _mm_unpackhi_epi32(_sum10, _sum11);
_tmp3 = _mm_unpackhi_epi32(_sum12, _sum13);
_sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum02 = _mm_add_epi32(_sum02, _sum03);
_sum10 = _mm_add_epi32(_sum10, _sum11);
_sum12 = _mm_add_epi32(_sum12, _sum13);
_sum00 = _mm_add_epi32(_sum00, _sum02);
_sum10 = _mm_add_epi32(_sum10, _sum12);
#endif
}
#if __AVX2__
__m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0);
__m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1);
#endif
int j = 0;
for (; j < nn1; j++)
{
__m128i _val = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]);
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99754
// gcc incorrectly put 32bit to tail with _mm_loadu_si32 :(
// 0 1 2 3 x x x x x x x x x x x x
// x x x x x x x x x x x x 0 1 2 3
// __m128i _w0123 = _mm_loadu_si32(kptr0);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
_w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _sl00 = _mm_mullo_epi16(_val, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val, _w0123);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00));
tmpptr += 2;
kptr0 += 4;
}
int sum[8];
_mm_storeu_si128((__m128i*)sum, _sum00);
_mm_storeu_si128((__m128i*)(sum + 4), _sum10);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
__m128i _sum0 = _mm_setzero_si128();
if (nn4 > 0)
{
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val0 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
_val0 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl01, _sh01));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl01, _sh01));
tmpptr += 4;
kptr0 += 16;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
}
int j = 0;
for (; j < nn1; j++)
{
__m128i _val = _mm_set1_epi16(tmpptr[0]);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
__m128i _sl00 = _mm_mullo_epi16(_val, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val, _w0123);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
tmpptr += 1;
kptr0 += 4;
}
int sum[4];
_mm_storeu_si128((__m128i*)sum, _sum0);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
remain_outch_start += nn_outch << 2;
#endif // __SSE2__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __SSE2__
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
if (nn4 > 0)
{
__m256i _sum0_2 = _mm256_setzero_si256();
__m256i _sum1_3 = _mm256_setzero_si256();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
__m128i _w = _mm_cvtepi8_epi16(_w0123);
__m256i _ww = _mm256_inserti128_si256(_mm256_castsi128_si256(_w), _w, 1);
__m256i _sl0_1 = _mm256_mullo_epi16(_val01_16, _ww);
__m256i _sh0_1 = _mm256_mulhi_epi16(_val01_16, _ww);
_sum0_2 = _mm256_add_epi32(_sum0_2, _mm256_unpacklo_epi16(_sl0_1, _sh0_1));
_sum1_3 = _mm256_add_epi32(_sum1_3, _mm256_unpackhi_epi16(_sl0_1, _sh0_1));
tmpptr += 16;
kptr0 += 4;
}
__m128i _sum0 = _mm256_extracti128_si256(_sum0_2, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum1_3, 0);
__m128i _sum2 = _mm256_extracti128_si256(_sum0_2, 1);
__m128i _sum3 = _mm256_extracti128_si256(_sum1_3, 1);
sum0 = _mm_reduce_add_epi32(_sum0);
sum1 = _mm_reduce_add_epi32(_sum1);
sum2 = _mm_reduce_add_epi32(_sum2);
sum3 = _mm_reduce_add_epi32(_sum3);
}
int j = 0;
for (; j < nn1; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char w = kptr0[0];
sum0 += val0 * w;
sum1 += val1 * w;
sum2 += val2 * w;
sum3 += val3 * w;
tmpptr += 4;
kptr0 += 1;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum0 = 0;
int sum1 = 0;
if (nn4 > 0)
{
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
__m128i _extval = _mm_cmpgt_epi8(_mm_setzero_si128(), _val);
__m128i _val0 = _mm_unpacklo_epi8(_val, _extval);
__m128i _val1 = _mm_unpacklo_epi8(_val, _extval);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
__m128i _w = _mm_unpacklo_epi8(_w0123, _extw);
#endif
__m128i _sl0 = _mm_mullo_epi16(_val0, _w);
__m128i _sh0 = _mm_mulhi_epi16(_val0, _w);
__m128i _sl1 = _mm_mullo_epi16(_val1, _w);
__m128i _sh1 = _mm_mulhi_epi16(_val1, _w);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpacklo_epi16(_sl1, _sh1));
tmpptr += 8;
kptr0 += 4;
}
sum0 = _mm_reduce_add_epi32(_sum0);
sum1 = _mm_reduce_add_epi32(_sum1);
}
int j = 0;
for (; j < nn1; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char w = kptr0[0];
sum0 += val0 * w;
sum1 += val1 * w;
tmpptr += 2;
kptr0 += 1;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum = 0;
if (nn4 > 0)
{
__m128i _sum = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val0123 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val = _mm_cvtepi8_epi16(_val0123);
#else
__m128i _extval = _mm_cmpgt_epi8(_mm_setzero_si128(), _val0123);
__m128i _val = _mm_unpacklo_epi8(_val0123, _extval);
#endif
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
__m128i _w = _mm_unpacklo_epi8(_w0123, _extw);
#endif
__m128i _sl = _mm_mullo_epi16(_val, _w);
__m128i _sh = _mm_mulhi_epi16(_val, _w);
_sum = _mm_add_epi32(_sum, _mm_unpacklo_epi16(_sl, _sh));
tmpptr += 4;
kptr0 += 4;
}
sum = _mm_reduce_add_epi32(_sum);
}
int j = 0;
for (; j < nn1; j++)
{
signed char val = tmpptr[0];
signed char w = kptr0[0];
sum += val * w;
tmpptr += 1;
kptr0 += 1;
}
outptr0[0] = sum;
outptr0 += 1;
}
#else // __SSE2__
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i);
const signed char* kptr0 = kernel.channel(p);
int nn1 = inch * maxk;
int sum = 0;
int j = 0;
for (; j < nn1; j++)
{
signed char val = tmpptr[0];
signed char w = kptr0[0];
sum += val * w;
tmpptr += 1;
kptr0 += 1;
}
outptr0[0] = sum;
outptr0 += 1;
}
#endif // __SSE2__
}
}
static void convolution_im2col_sgemm_transform_kernel_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
#if __SSE2__
// interleave
// src = maxk-inch-outch
// dst = 4a-4b-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
{
if (inch >= 4)
kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u);
else
kernel_tm.create(4 * maxk, inch, outch / 4 + outch % 4, (size_t)1u);
}
else
{
if (inch >= 4)
kernel_tm.create(4 * maxk, inch / 4 + inch % 4, outch, (size_t)1u);
else
kernel_tm.create(1 * maxk, inch, outch, (size_t)1u);
}
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
int p = 0;
for (; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; p < inch; p++)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p);
g00[0] = k00[k];
g00++;
}
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
int p = 0;
for (; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
for (; p < inch; p++)
{
for (int k = 0; k < maxk; k++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p);
g00[0] = k00[k];
g00++;
}
}
}
#else // __SSE2__
kernel_tm = _kernel.reshape(maxk, inch, outch);
#endif // __SSE2__
}
static void convolution_im2col_sgemm_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
ptr[2] = sptr[stride_w * 2];
ptr[3] = sptr[stride_w * 3];
sptr += stride_w * 4;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
sptr += stride_w * 2;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
|
interpolation_p2.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
static inline void interpolation_p2_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int write_dim_j = block->dim.j<<1;
int write_dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
const double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
read = level_c->my_boxes[ block->read.box].vectors[id_c] + level_c->box_ghosts*(1+ read_jStride+ read_kStride);
}
if(block->write.box>=0){
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->box_ghosts*(1+write_jStride+write_kStride);
}
#ifdef USE_NAIVE_INTERP
int i,j,k;
double OneOver32Cubed = 1.0/32768.0;
for(k=0;k<write_dim_k;k++){int delta_k=-read_kStride;if(k&0x1)delta_k=read_kStride;
for(j=0;j<write_dim_j;j++){int delta_j=-read_jStride;if(j&0x1)delta_j=read_jStride;
for(i=0;i<write_dim_i;i++){int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
//
// | -3/32 | 30/32 | 5/32 |
// |---+---|---+---|---+---|
// | | | | x | | |
//
write[write_ijk] = prescale_f*write[write_ijk] +
OneOver32Cubed*(
-27.0*read[read_ijk-delta_i-delta_j-delta_k] +
270.0*read[read_ijk -delta_j-delta_k] +
45.0*read[read_ijk+delta_i-delta_j-delta_k] +
270.0*read[read_ijk-delta_i -delta_k] +
-2700.0*read[read_ijk -delta_k] +
-450.0*read[read_ijk+delta_i -delta_k] +
45.0*read[read_ijk-delta_i+delta_j-delta_k] +
-450.0*read[read_ijk +delta_j-delta_k] +
-75.0*read[read_ijk+delta_i+delta_j-delta_k] +
270.0*read[read_ijk-delta_i-delta_j ] +
-2700.0*read[read_ijk -delta_j ] +
-450.0*read[read_ijk+delta_i-delta_j ] +
-2700.0*read[read_ijk-delta_i ] +
27000.0*read[read_ijk ] +
4500.0*read[read_ijk+delta_i ] +
-450.0*read[read_ijk-delta_i+delta_j ] +
4500.0*read[read_ijk +delta_j ] +
750.0*read[read_ijk+delta_i+delta_j ] +
45.0*read[read_ijk-delta_i-delta_j+delta_k] +
-450.0*read[read_ijk -delta_j+delta_k] +
-75.0*read[read_ijk+delta_i-delta_j+delta_k] +
-450.0*read[read_ijk-delta_i +delta_k] +
4500.0*read[read_ijk +delta_k] +
750.0*read[read_ijk+delta_i +delta_k] +
-75.0*read[read_ijk-delta_i+delta_j+delta_k] +
750.0*read[read_ijk +delta_j+delta_k] +
125.0*read[read_ijk+delta_i+delta_j+delta_k]
);
}}}
#else
int i,j,k;
int ii,jj,kk;
double w0 = 5.0/32.0;
double w1 = 30.0/32.0;
double w2 = -3.0/32.0;
for(k=0,kk=0;k<write_dim_k;k+=2,kk++){
for(j=0,jj=0;j<write_dim_j;j+=2,jj++){
// compiler cannot infer/speculate write[ijk+write_jStride] is disjoint from write[ijk], so create a unique restrict pointers for each nonliteral offset...
double * __restrict__ write00 = write + write_i + (write_j+j+0)*write_jStride + (write_k+k+0)*write_kStride;
double * __restrict__ write10 = write + write_i + (write_j+j+1)*write_jStride + (write_k+k+0)*write_kStride;
double * __restrict__ write01 = write + write_i + (write_j+j+0)*write_jStride + (write_k+k+1)*write_kStride;
double * __restrict__ write11 = write + write_i + (write_j+j+1)*write_jStride + (write_k+k+1)*write_kStride;
for(i=0,ii=0;i<write_dim_i;i+=2,ii++){
int write_ijk = ( i+write_i) + ( j+write_j)*write_jStride + ( k+write_k)*write_kStride;
int read_ijk = (ii+ read_i) + (jj+ read_j)* read_jStride + (kk+ read_k)* read_kStride;
//
// | 5/32 | 30/32 | -3/32 | coarse grid
// |---+---|---+---|---+---|
// | | | ? | | | | fine grid
//
// grab all coarse grid points...
const double c000=read[read_ijk-1-read_jStride-read_kStride], c100=read[read_ijk -read_jStride-read_kStride], c200=read[read_ijk+1-read_jStride-read_kStride];
const double c010=read[read_ijk-1 -read_kStride], c110=read[read_ijk -read_kStride], c210=read[read_ijk+1 -read_kStride];
const double c020=read[read_ijk-1+read_jStride-read_kStride], c120=read[read_ijk +read_jStride-read_kStride], c220=read[read_ijk+1+read_jStride-read_kStride];
const double c001=read[read_ijk-1-read_jStride ], c101=read[read_ijk -read_jStride ], c201=read[read_ijk+1-read_jStride ];
const double c011=read[read_ijk-1 ], c111=read[read_ijk ], c211=read[read_ijk+1 ];
const double c021=read[read_ijk-1+read_jStride ], c121=read[read_ijk +read_jStride ], c221=read[read_ijk+1+read_jStride ];
const double c002=read[read_ijk-1-read_jStride+read_kStride], c102=read[read_ijk -read_jStride+read_kStride], c202=read[read_ijk+1-read_jStride+read_kStride];
const double c012=read[read_ijk-1 +read_kStride], c112=read[read_ijk +read_kStride], c212=read[read_ijk+1 +read_kStride];
const double c022=read[read_ijk-1+read_jStride+read_kStride], c122=read[read_ijk +read_jStride+read_kStride], c222=read[read_ijk+1+read_jStride+read_kStride];
// interpolate in i to create fine i / coarse jk points...
//
// +-------+-------+-------+ :.......+---+---+.......:
// | | | | : | | | :
// | c | c | c | : | f | f | :
// | | | | : | | | :
// +-------+-------+-------+ :.......+---+---+.......:
// | | | | : | | | :
// | c | c | c | -> : | f | f | :
// | | | | : | | | :
// +-------+-------+-------+ :.......+---+---+.......:
// | | | | : | | | :
// | c | c | c | : | f | f | :
// | | | | : | | | :
// +-------+-------+-------+ :.......+---+---+.......:
//
const double f0c00 = ( w1*c100 + w0*c000 + w2*c200 );
const double f1c00 = ( w1*c100 + w2*c000 + w0*c200 );
const double f0c10 = ( w1*c110 + w0*c010 + w2*c210 );
const double f1c10 = ( w1*c110 + w2*c010 + w0*c210 );
const double f0c20 = ( w1*c120 + w0*c020 + w2*c220 );
const double f1c20 = ( w1*c120 + w2*c020 + w0*c220 );
const double f0c01 = ( w1*c101 + w0*c001 + w2*c201 );
const double f1c01 = ( w1*c101 + w2*c001 + w0*c201 );
const double f0c11 = ( w1*c111 + w0*c011 + w2*c211 );
const double f1c11 = ( w1*c111 + w2*c011 + w0*c211 );
const double f0c21 = ( w1*c121 + w0*c021 + w2*c221 );
const double f1c21 = ( w1*c121 + w2*c021 + w0*c221 );
const double f0c02 = ( w1*c102 + w0*c002 + w2*c202 );
const double f1c02 = ( w1*c102 + w2*c002 + w0*c202 );
const double f0c12 = ( w1*c112 + w0*c012 + w2*c212 );
const double f1c12 = ( w1*c112 + w2*c012 + w0*c212 );
const double f0c22 = ( w1*c122 + w0*c022 + w2*c222 );
const double f1c22 = ( w1*c122 + w2*c022 + w0*c222 );
// interpolate in j to create fine ij / coarse k points...
//
// :.......+---+---+.......: :.......:.......:.......:
// : | | | : : : : :
// : | | | : : : : :
// : | | | : : : : :
// :.......+---+---+.......: :.......+---+---+.......:
// : | | | : : | | | :
// : | | | : -> : +---+---+ :
// : | | | : : | | | :
// :.......+---+---+.......: :.......+---+---+.......:
// : | | | : : : : :
// : | | | : : : : :
// : | | | : : : : :
// :.......+---+---+.......: :.......:.......:.......:
//
const double f00c0 = ( w1*f0c10 + w0*f0c00 + w2*f0c20 );
const double f10c0 = ( w1*f1c10 + w0*f1c00 + w2*f1c20 );
const double f01c0 = ( w1*f0c10 + w2*f0c00 + w0*f0c20 );
const double f11c0 = ( w1*f1c10 + w2*f1c00 + w0*f1c20 );
const double f00c1 = ( w1*f0c11 + w0*f0c01 + w2*f0c21 );
const double f10c1 = ( w1*f1c11 + w0*f1c01 + w2*f1c21 );
const double f01c1 = ( w1*f0c11 + w2*f0c01 + w0*f0c21 );
const double f11c1 = ( w1*f1c11 + w2*f1c01 + w0*f1c21 );
const double f00c2 = ( w1*f0c12 + w0*f0c02 + w2*f0c22 );
const double f10c2 = ( w1*f1c12 + w0*f1c02 + w2*f1c22 );
const double f01c2 = ( w1*f0c12 + w2*f0c02 + w0*f0c22 );
const double f11c2 = ( w1*f1c12 + w2*f1c02 + w0*f1c22 );
// interpolate in k to create fine ijk points...
const double f000 = ( w1*f00c1 + w0*f00c0 + w2*f00c2 );
const double f100 = ( w1*f10c1 + w0*f10c0 + w2*f10c2 );
const double f010 = ( w1*f01c1 + w0*f01c0 + w2*f01c2 );
const double f110 = ( w1*f11c1 + w0*f11c0 + w2*f11c2 );
const double f001 = ( w1*f00c1 + w2*f00c0 + w0*f00c2 );
const double f101 = ( w1*f10c1 + w2*f10c0 + w0*f10c2 );
const double f011 = ( w1*f01c1 + w2*f01c0 + w0*f01c2 );
const double f111 = ( w1*f11c1 + w2*f11c0 + w0*f11c2 );
// commit to memory...
#if 0 // compiler cannot infer/speculate write[ijk+write_jStride] is disjoint from write[ijk], and thus cannot vectorize...
write[write_ijk ] = prescale_f*write[write_ijk ] + f000;
write[write_ijk+1 ] = prescale_f*write[write_ijk+1 ] + f100;
write[write_ijk +write_jStride ] = prescale_f*write[write_ijk +write_jStride ] + f010;
write[write_ijk+1+write_jStride ] = prescale_f*write[write_ijk+1+write_jStride ] + f110;
write[write_ijk +write_kStride] = prescale_f*write[write_ijk +write_kStride] + f001;
write[write_ijk+1 +write_kStride] = prescale_f*write[write_ijk+1 +write_kStride] + f101;
write[write_ijk +write_jStride+write_kStride] = prescale_f*write[write_ijk +write_jStride+write_kStride] + f011;
write[write_ijk+1+write_jStride+write_kStride] = prescale_f*write[write_ijk+1+write_jStride+write_kStride] + f111;
#else // use a unique restrict pointer for each pencil...
write00[i ] = prescale_f*write00[i ] + f000;
write00[i+1] = prescale_f*write00[i+1] + f100;
write10[i ] = prescale_f*write10[i ] + f010;
write10[i+1] = prescale_f*write10[i+1] + f110;
write01[i ] = prescale_f*write01[i ] + f001;
write01[i+1] = prescale_f*write01[i+1] + f101;
write11[i ] = prescale_f*write11[i ] + f011;
write11[i+1] = prescale_f*write11[i+1] + f111;
#endif
}}}
#endif
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) piecewise quadratic interpolation
void interpolation_p2(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
exchange_boundary(level_c,id_c,STENCIL_SHAPE_BOX);
apply_BCs_p2(level_c,id_c,STENCIL_SHAPE_BOX);
double _timeCommunicationStart = getTime();
double _timeStart,_timeEnd;
int buffer=0;
int n;
int my_tag = (level_f->tag<<4) | 0x7;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
if(level_f->interpolation.num_recvs>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = getTime();
level_f->timers.interpolation_recv += (_timeEnd-_timeStart);
}
// pack MPI send buffers...
if(level_c->interpolation.num_blocks[0]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0])
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){
// !!! prescale==0 because you don't want to increment the MPI buffer
interpolation_p2_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_pack += (_timeEnd-_timeStart);
}
// loop through MPI send buffers and post Isend's...
if(level_c->interpolation.num_sends>0){
_timeStart = getTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = getTime();
level_f->timers.interpolation_send += (_timeEnd-_timeStart);
}
#endif
// perform local interpolation... try and hide within Isend latency...
if(level_c->interpolation.num_blocks[1]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1])
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){
interpolation_p2_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_local += (_timeEnd-_timeStart);
}
// wait for MPI to finish...
#ifdef USE_MPI
if(nMessages>0){
_timeStart = getTime();
MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = getTime();
level_f->timers.interpolation_wait += (_timeEnd-_timeStart);
}
// unpack MPI receive buffers
if(level_f->interpolation.num_blocks[2]>0){
_timeStart = getTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2])
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){
IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);
}
_timeEnd = getTime();
level_f->timers.interpolation_unpack += (_timeEnd-_timeStart);
}
#endif
level_f->timers.interpolation_total += (double)(getTime()-_timeCommunicationStart);
}
|
GB_binop__rminus_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rminus_fp32
// A.*B function (eWiseMult): GB_AemultB__rminus_fp32
// A*D function (colscale): GB_AxD__rminus_fp32
// D*A function (rowscale): GB_DxB__rminus_fp32
// C+=B function (dense accum): GB_Cdense_accumB__rminus_fp32
// C+=b function (dense accum): GB_Cdense_accumb__rminus_fp32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_fp32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_fp32
// C=scalar+B GB_bind1st__rminus_fp32
// C=scalar+B' GB_bind1st_tran__rminus_fp32
// C=A+scalar GB_bind2nd__rminus_fp32
// C=A'+scalar GB_bind2nd_tran__rminus_fp32
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (y - x) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_FP32 || GxB_NO_RMINUS_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rminus_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rminus_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float bij = Bx [p] ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rminus_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB_bind1st_tran__rminus_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB_bind2nd_tran__rminus_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint16_int16
// op(A') function: GB_unop_tran__identity_uint16_int16
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint16_int16
(
uint16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_AxB_dot2.c | //------------------------------------------------------------------------------
// GB_AxB_dot2: compute C=A'*B or C<!M>=A'*B in parallel, in place
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// GB_AxB_dot2 does its computation in two phases. The first phase counts the
// number of entries in each column of C. The second phase can then construct
// the result C in place, and thus this method can be done in parallel, for the
// single matrix computation C=A'*B.
// Two variants are handled: C=A'*B and C<!M>=A'*B.
// The C<M>=A'*B computation is computed by GB_AxB_dot3.
#include "GB_mxm.h"
#include "GB_iterator.h"
#ifndef GBCOMPACT
#include "GB_AxB__include.h"
#endif
#define GB_FREE_WORK \
{ \
GB_FREE (B_slice) ; \
if (C_counts != NULL) \
{ \
for (int taskid = 0 ; taskid < naslice ; taskid++) \
{ \
GB_FREE (C_counts [taskid]) ; \
} \
} \
GB_FREE (C_counts) ; \
}
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_AxB_dot2 // C=A'*B or C<!M>=A'*B, dot product method
(
GrB_Matrix *Chandle, // output matrix
const GrB_Matrix M, // mask matrix for C<!M>=A'*B
// if present, the mask is complemented
const bool Mask_struct, // if true, use the only structure of M
const GrB_Matrix *Aslice, // input matrices (already sliced)
const GrB_Matrix B, // input matrix
const GrB_Semiring semiring, // semiring that defines C=A*B
const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b)
bool *mask_applied, // if true, mask was applied
int nthreads,
int naslice,
int nbslice,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (Aslice != NULL) ;
GrB_Matrix A = Aslice [0] ; // just for type and dimensions
ASSERT (Chandle != NULL) ;
ASSERT (*Chandle == NULL) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for dot A'*B", GB0) ;
ASSERT_MATRIX_OK (A, "A for dot A'*B", GB0) ;
for (int taskid = 0 ; taskid < naslice ; taskid++)
{
ASSERT_MATRIX_OK (Aslice [taskid], "A slice for dot2 A'*B", GB0) ;
ASSERT (!GB_PENDING (Aslice [taskid])) ;
ASSERT (!GB_ZOMBIES (Aslice [taskid])) ;
ASSERT ((Aslice [taskid])->vlen == B->vlen) ;
ASSERT (A->vlen == (Aslice [taskid])->vlen) ;
ASSERT (A->vdim == (Aslice [taskid])->vdim) ;
ASSERT (A->type == (Aslice [taskid])->type) ;
}
ASSERT_MATRIX_OK (B, "B for dot A'*B", GB0) ;
ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
ASSERT_SEMIRING_OK (semiring, "semiring for numeric A'*B", GB0) ;
ASSERT (A->vlen == B->vlen) ;
ASSERT (mask_applied != NULL) ;
int64_t *GB_RESTRICT B_slice = NULL ;
int64_t **C_counts = NULL ;
int64_t cnvec = B->nvec ;
//--------------------------------------------------------------------------
// get the semiring operators
//--------------------------------------------------------------------------
GrB_BinaryOp mult = semiring->multiply ;
GrB_Monoid add = semiring->add ;
ASSERT (mult->ztype == add->op->ztype) ;
bool A_is_pattern, B_is_pattern ;
GB_AxB_pattern (&A_is_pattern, &B_is_pattern, flipxy, mult->opcode) ;
(*Chandle) = NULL ;
//--------------------------------------------------------------------------
// allocate workspace and slice B
//--------------------------------------------------------------------------
if (!GB_pslice (&B_slice, /* B */ B->p, B->nvec, nbslice))
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute # of entries in each vector of C
//--------------------------------------------------------------------------
GrB_Type ctype = add->op->ztype ;
int64_t cvlen = A->vdim ;
int64_t cvdim = B->vdim ;
if (B->nvec_nonempty < 0)
{
B->nvec_nonempty = GB_nvec_nonempty (B, NULL) ;
}
C_counts = GB_CALLOC (naslice, int64_t *) ;
if (C_counts == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
for (int a_taskid = 0 ; a_taskid < naslice ; a_taskid++)
{
int64_t *GB_RESTRICT C_count = GB_CALLOC (B->nvec, int64_t) ;
if (C_count == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GrB_OUT_OF_MEMORY) ;
}
C_counts [a_taskid] = C_count ;
}
for (int a_taskid = 0 ; a_taskid < naslice ; a_taskid++)
{
if ((Aslice [a_taskid])->nvec_nonempty < 0)
{
(Aslice [a_taskid])->nvec_nonempty =
GB_nvec_nonempty (Aslice [a_taskid], NULL) ;
}
}
// phase1 parallel region: each thread computes C_counts [taskid]
// for its slice.
#define GB_PHASE_1_OF_2
#include "GB_AxB_dot2_meta.c"
#undef GB_PHASE_1_OF_2
info = GB_new (Chandle, ctype, cvlen, cvdim, GB_Ap_malloc, true,
GB_SAME_HYPER_AS (B->is_hyper), B->hyper_ratio, cnvec, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_WORK ;
return (info) ;
}
GrB_Matrix C = (*Chandle) ;
int64_t *GB_RESTRICT Cp = C->p ;
// cumulative sum of counts in each column
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < cnvec ; k++)
{
int64_t s = 0 ;
for (int taskid = 0 ; taskid < naslice ; taskid++)
{
int64_t *GB_RESTRICT C_count = C_counts [taskid] ;
int64_t c = C_count [k] ;
C_count [k] = s ;
s += c ;
}
Cp [k] = s ;
}
Cp [cnvec] = 0 ;
C->nvec = cnvec ;
// Cp = cumulative sum of Cp
GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads) ;
int64_t cnz = Cp [cnvec] ;
// C->h = B->h
if (B->is_hyper)
{
GB_memcpy (C->h, B->h, cnvec * sizeof (int64_t), nthreads) ;
}
// free C_count for the first thread; it is no longer needed
GB_FREE (C_counts [0]) ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// allocate C->x and C->i
//--------------------------------------------------------------------------
info = GB_ix_alloc (C, cnz, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_MATRIX_FREE (Chandle) ;
GB_FREE_WORK ;
return (info) ;
}
//--------------------------------------------------------------------------
// C = A'*B, computing each entry with a dot product, via builtin semiring
//--------------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
//----------------------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------------------
#define GB_Adot2B(add,mult,xname) GB_Adot2B_ ## add ## mult ## xname
#define GB_AxB_WORKER(add,mult,xname) \
{ \
info = GB_Adot2B (add,mult,xname) (C, M, Mask_struct, \
Aslice, A_is_pattern, B, B_is_pattern, B_slice, \
C_counts, nthreads, naslice, nbslice) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//----------------------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------------------
GB_Opcode mult_opcode, add_opcode ;
GB_Type_code xcode, ycode, zcode ;
if (GB_AxB_semiring_builtin (A, A_is_pattern, B, B_is_pattern, semiring,
flipxy, &mult_opcode, &add_opcode, &xcode, &ycode, &zcode))
{
#include "GB_AxB_factory.c"
}
ASSERT (info == GrB_SUCCESS || info == GrB_NO_VALUE) ;
#endif
//--------------------------------------------------------------------------
// C = A'*B, computing each entry with a dot product, with typecasting
//--------------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (C, "generic ") ;
//----------------------------------------------------------------------
// get operators, functions, workspace, contents of A, B, C, and M
//----------------------------------------------------------------------
GxB_binary_function fmult = mult->function ;
GxB_binary_function fadd = add->op->function ;
size_t csize = C->type->size ;
size_t asize = A_is_pattern ? 0 : A->type->size ;
size_t bsize = B_is_pattern ? 0 : B->type->size ;
size_t xsize = mult->xtype->size ;
size_t ysize = mult->ytype->size ;
// scalar workspace: because of typecasting, the x/y types need not
// be the same as the size of the A and B types.
// flipxy false: aki = (xtype) A(k,i) and bkj = (ytype) B(k,j)
// flipxy true: aki = (ytype) A(k,i) and bkj = (xtype) B(k,j)
size_t aki_size = flipxy ? ysize : xsize ;
size_t bkj_size = flipxy ? xsize : ysize ;
GB_void *GB_RESTRICT terminal = (GB_void *) add->terminal ;
GB_cast_function cast_A, cast_B ;
if (flipxy)
{
// A is typecasted to y, and B is typecasted to x
cast_A = A_is_pattern ? NULL :
GB_cast_factory (mult->ytype->code, A->type->code) ;
cast_B = B_is_pattern ? NULL :
GB_cast_factory (mult->xtype->code, B->type->code) ;
}
else
{
// A is typecasted to x, and B is typecasted to y
cast_A = A_is_pattern ? NULL :
GB_cast_factory (mult->xtype->code, A->type->code) ;
cast_B = B_is_pattern ? NULL :
GB_cast_factory (mult->ytype->code, B->type->code) ;
}
//----------------------------------------------------------------------
// C = A'*B via dot products, function pointers, and typecasting
//----------------------------------------------------------------------
// aki = A(k,i), located in Ax [pA]
#define GB_GETA(aki,Ax,pA) \
GB_void aki [GB_VLA(aki_size)] ; \
if (!A_is_pattern) cast_A (aki, Ax +((pA)*asize), asize)
// bkj = B(k,j), located in Bx [pB]
#define GB_GETB(bkj,Bx,pB) \
GB_void bkj [GB_VLA(bkj_size)] ; \
if (!B_is_pattern) cast_B (bkj, Bx +((pB)*bsize), bsize)
// break if cij reaches the terminal value
#define GB_DOT_TERMINAL(cij) \
if (terminal != NULL && memcmp (cij, terminal, csize) == 0) \
{ \
break ; \
}
// C(i,j) = A(i,k) * B(k,j)
#define GB_MULT(cij, aki, bkj) \
GB_FMULT (cij, aki, bkj)
// C(i,j) += A(i,k) * B(k,j)
#define GB_MULTADD(cij, aki, bkj) \
GB_void zwork [GB_VLA(csize)] ; \
GB_MULT (zwork, aki, bkj) ; \
fadd (cij, cij, zwork)
// define cij for each task
#define GB_CIJ_DECLARE(cij) \
GB_void cij [GB_VLA(csize)]
// address of Cx [p]
#define GB_CX(p) Cx +((p)*csize)
// save the value of C(i,j)
#define GB_CIJ_SAVE(cij,p) \
memcpy (GB_CX (p), cij, csize)
#define GB_ATYPE GB_void
#define GB_BTYPE GB_void
#define GB_CTYPE GB_void
#define GB_PHASE_2_OF_2
// no vectorization
#define GB_PRAGMA_SIMD_VECTORIZE ;
#define GB_PRAGMA_SIMD_DOT(cij) ;
if (flipxy)
{
#define GB_FMULT(z,x,y) fmult (z,y,x)
#include "GB_AxB_dot2_meta.c"
#undef GB_FMULT
}
else
{
#define GB_FMULT(z,x,y) fmult (z,x,y)
#include "GB_AxB_dot2_meta.c"
#undef GB_FMULT
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
ASSERT_MATRIX_OK (C, "dot: C = A'*B output", GB0) ;
ASSERT (*Chandle == C) ;
(*mask_applied) = (M != NULL) ;
return (GrB_SUCCESS) ;
}
|
GB_unop__exp_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__exp_fc32_fc32)
// op(A') function: GB (_unop_tran__exp_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = cexpf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cexpf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = cexpf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__exp_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = cexpf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = cexpf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__exp_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fp64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp64_int64
// op(A') function: GB_unop_tran__identity_fp64_int64
// C type: double
// A type: int64_t
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp64_int64
(
double *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rose_accumulateForce.c | #include "omp.h"
void AccumulateForce(int *idxBound,int *idxList,int len,double *tmp,double *force)
{
#pragma omp parallel for private (jj) firstprivate (len)
for (register int ii = 0; ii <= len - 1; ii += 1) {
int count = idxBound[ii + 1] - idxBound[ii];
int *list = &idxList[idxBound[ii]];
double sum = 0.0;
#pragma omp parallel for reduction (+:sum) firstprivate (count)
for (register int jj = 0; jj <= count - 1; jj += 1) {
int idx = list[jj];
sum += tmp[list[jj]];
}
force[ii] += sum;
}
return ;
}
|
matmul.c | #include <stdlib.h>
#include <sys/time.h>
#include <stdio.h>
//#define _OPENACCM
#ifdef _OPENACCM
#include <openacc.h>
#endif
#if OMP == 1
#include <omp.h>
#endif
#ifdef __cplusplus
#define restrict __restrict__
#endif
#ifndef _N_
#define _N_ 512
#endif
#ifndef HOST_MEM_ALIGNMENT
#define HOST_MEM_ALIGNMENT 0
#endif
#if HOST_MEM_ALIGNMENT == 1
#define AOCL_ALIGNMENT 64
#endif
int N = _N_;
int M = N;
int P = N;
double my_timer ()
{
struct timeval time;
gettimeofday (&time, 0);
return time.tv_sec + time.tv_usec / 1000000.0;
}
void
MatrixMultiplication_openacc(float * restrict a,float * restrict b, float * restrict c)
{
int i, j, k ;
#ifdef _OPENACCM
acc_init(acc_device_default);
#endif
#pragma acc data copyout(a[0:(M*N)]), copyin(b[0:(M*P)],c[0:(P*N)])
{
#pragma acc kernels loop independent gang
for (i=0; i<M; i++){
#pragma acc loop worker
for (j=0; j<N; j++)
{
float sum = 0.0 ;
#pragma acc loop seq
for (k=0; k<P; k++) {
sum += b[i*P+k]*c[k*N+j] ;
}
a[i*N+j] = sum ;
}
}
}
#ifdef _OPENACCM
acc_shutdown(acc_device_default);
#endif
}
void
MatrixMultiplication_openmp(float * restrict a,float * restrict b, float * restrict c)
{
int i, j, k ;
int chunk = N/4;
#pragma omp parallel shared(a,b,c,chunk) private(i,j,k)
{
#ifdef _OPENMP
if(omp_get_thread_num() == 0) {
printf("Number of OpenMP threads %d\n", omp_get_num_threads());
}
#endif
#pragma omp for
for (i=0; i<M; i++){
for (j=0; j<N; j++)
{
float sum = 0.0 ;
for (k=0; k<P; k++)
sum += b[i*P+k]*c[k*N+j] ;
a[i*N+j] = sum ;
}
}
}
}
int main()
{
float *a, *b, *c;
#if HOST_MEM_ALIGNMENT == 1
void *p;
#endif
int i;
double elapsed_time;
#if HOST_MEM_ALIGNMENT == 1
posix_memalign(&p, AOCL_ALIGNMENT, N*N*sizeof(float));
a = (float *)p;
posix_memalign(&p, AOCL_ALIGNMENT, N*N*sizeof(float));
b = (float *)p;
posix_memalign(&p, AOCL_ALIGNMENT, N*N*sizeof(float));
c = (float *)p;
#else
a = (float *) malloc(M*N*sizeof(float));
b = (float *) malloc(M*P*sizeof(float));
c = (float *) malloc(P*N*sizeof(float));
#endif
for (i = 0; i < M*N; i++) {
a[i] = (float) 0.0;
}
for (i = 0; i < M*P; i++) {
b[i] = (float) i;
}
for (i = 0; i < P*N; i++) {
c[i] = (float) 1.0;
}
elapsed_time = my_timer();
MatrixMultiplication_openmp(a,b,c);
elapsed_time = my_timer() - elapsed_time;
printf("CPU Elapsed time = %lf sec\n", elapsed_time);
elapsed_time = my_timer();
MatrixMultiplication_openacc(a,b,c);
elapsed_time = my_timer() - elapsed_time;
printf("Accelerator Elapsed time = %lf sec\n", elapsed_time);
free(a);
free(b);
free(c);
return 0;
}
|
omp_thread_limit.c | // RUN: %libomp-compile && env OMP_THREAD_LIMIT=4 %libomp-run 4
// RUN: %libomp-compile && env OMP_THREAD_LIMIT=7 %libomp-run 7
//
// OMP_THREAD_LIMIT=N should imply that no more than N threads are active in
// a contention group
#include <stdio.h>
#include <string.h>
#include <limits.h>
#include "omp_testsuite.h"
int failed = 0;
void usage() {
fprintf(stderr, "usage: omp_thread_limit <n>\n");
}
void verify(const char* file_name, int line_number, int team_size) {
int num_threads = omp_get_num_threads();
if (team_size != num_threads) {
#pragma omp critical(A)
{
char label[256];
snprintf(label, sizeof(label), "%s:%d", file_name, line_number);
failed = 1;
printf("failed: %s: team_size(%d) != omp_get_num_threads(%d)\n",
label, team_size, num_threads);
}
}
}
int main(int argc, char** argv)
{
int cl_thread_limit;
if (argc != 2) {
usage();
return 1;
}
cl_thread_limit = atoi(argv[1]);
omp_set_dynamic(0);
if (omp_get_thread_limit() != cl_thread_limit) {
fprintf(stderr, "omp_get_thread_limit failed with %d, should be%d\n",
omp_get_thread_limit(), cl_thread_limit);
return 1;
}
else if (omp_get_max_threads() > cl_thread_limit) {
#if _OPENMP
int team_size = cl_thread_limit;
#else
int team_size = 1;
#endif
omp_set_num_threads(19);
verify(__FILE__, __LINE__, 1);
#pragma omp parallel
{
verify(__FILE__, __LINE__, team_size);
verify(__FILE__, __LINE__, team_size);
}
verify(__FILE__, __LINE__, 1);
omp_set_nested(1);
#pragma omp parallel num_threads(3)
{
verify(__FILE__, __LINE__, 3);
#pragma omp master
#pragma omp parallel num_threads(21)
{
verify(__FILE__, __LINE__, team_size-2);
verify(__FILE__, __LINE__, team_size-2);
}
}
verify(__FILE__, __LINE__, 1);
return failed;
} else {
fprintf(stderr, "This test is not applicable for max num_threads='%d'\n",
omp_get_max_threads());
return 0;
}
}
|
convolution_winograd_transform_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _r07 = vld1q_f32(r0 + 28);
float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f);
float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[7][m], _tmp7m);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f);
float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(tmp[5][m], _tmp5m);
vst1q_f32(tmp[6][m], _tmp6m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
float* r0_tm_6 = r0_tm_0 + tiles * 24;
float* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m = 0; m < 8; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f);
float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f);
float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
vst1q_f32(r0_tm_6, _r0tm6);
vst1q_f32(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
static void conv3x3s1_winograd63_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[6][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
const float* output0_tm_6 = output0_tm_0 + tiles * 24;
const float* output0_tm_7 = output0_tm_0 + tiles * 28;
float* output0 = out0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _out0tm6 = vld1q_f32(output0_tm_6);
float32x4_t _out0tm7 = vld1q_f32(output0_tm_7);
float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f));
float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[4][m], _tmp4m);
float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f));
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[5][m], _tmp5m);
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06);
float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 16, _out04);
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)));
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 12, _out03);
vst1q_f32(output0 + 20, _out05);
output0 += outw * 4;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[6][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f);
float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f);
float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f);
float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f);
float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 24;
r0_tm_1 += tiles * 24;
r0_tm_2 += tiles * 24;
r0_tm_3 += tiles * 24;
r0_tm_4 += tiles * 24;
r0_tm_5 += tiles * 24;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[4][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
float* output0 = out0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b);
float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f);
float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f);
float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 0; m < 4; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b));
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 12, _out03);
output0 += outw * 4;
}
}
}
}
}
|
openmp_wrapper.h | /*!
* Copyright (c) 2017 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_OPENMP_WRAPPER_H_
#define LIGHTGBM_OPENMP_WRAPPER_H_
#ifdef _OPENMP
#include <LightGBM/utils/log.h>
#include <omp.h>
#include <exception>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <vector>
inline int OMP_NUM_THREADS() {
int ret = 1;
#pragma omp parallel
#pragma omp master
{ ret = omp_get_num_threads(); }
return ret;
}
class ThreadExceptionHelper {
public:
ThreadExceptionHelper() {
ex_ptr_ = nullptr;
}
~ThreadExceptionHelper() {
ReThrow();
}
void ReThrow() {
if (ex_ptr_ != nullptr) {
std::rethrow_exception(ex_ptr_);
}
}
void CaptureException() {
// only catch first exception.
if (ex_ptr_ != nullptr) { return; }
std::unique_lock<std::mutex> guard(lock_);
if (ex_ptr_ != nullptr) { return; }
ex_ptr_ = std::current_exception();
}
private:
std::exception_ptr ex_ptr_;
std::mutex lock_;
};
#define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper
#define OMP_LOOP_EX_BEGIN() try {
#define OMP_LOOP_EX_END() \
} \
catch (std::exception & ex) { \
Log::Warning(ex.what()); \
omp_except_helper.CaptureException(); \
} \
catch (...) { \
omp_except_helper.CaptureException(); \
}
#define OMP_THROW_EX() omp_except_helper.ReThrow()
#else
/*
* To be compatible with openmp, define a nothrow macro which is used by gcc
* openmp, but not by clang.
* See also https://github.com/dmlc/dmlc-core/blob/3106c1cbdcc9fc9ef3a2c1d2196a7a6f6616c13d/include/dmlc/omp.h#L14
*/
#if defined(__clang__)
#undef __GOMP_NOTHROW
#define __GOMP_NOTHROW
#elif defined(__cplusplus)
#undef __GOMP_NOTHROW
#define __GOMP_NOTHROW throw()
#else
#undef __GOMP_NOTHROW
#define __GOMP_NOTHROW __attribute__((__nothrow__))
#endif
#ifdef _MSC_VER
#pragma warning(disable : 4068) // disable unknown pragma warning
#endif
#ifdef __cplusplus
extern "C" {
#endif
/** Fall here if no OPENMP support, so just
simulate a single thread running.
All #pragma omp should be ignored by the compiler **/
inline void omp_set_num_threads(int) __GOMP_NOTHROW {} // NOLINT (no cast done here)
inline int omp_get_num_threads() __GOMP_NOTHROW {return 1;}
inline int omp_get_max_threads() __GOMP_NOTHROW {return 1;}
inline int omp_get_thread_num() __GOMP_NOTHROW {return 0;}
inline int OMP_NUM_THREADS() __GOMP_NOTHROW { return 1; }
#ifdef __cplusplus
} // extern "C"
#endif
#define OMP_INIT_EX()
#define OMP_LOOP_EX_BEGIN()
#define OMP_LOOP_EX_END()
#define OMP_THROW_EX()
#endif
#endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
|
GB_unop__identity_int64_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int64_uint32
// op(A') function: GB_unop_tran__identity_int64_uint32
// C type: int64_t
// A type: uint32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int64_uint32
(
int64_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
diag.c | /******************************************************************************
* *
* DIAG.C *
* *
* DIAGNOSTIC OUTPUT *
* *
******************************************************************************/
#include "decs.h"
// static double lnumin, lnumax, dlnu;
void reset_log_variables() {
#if RADIATION
step_tot = step_lost = step_made = step_abs = step_scatt = step_rec = 0;
step_sent = step_rcvd = step_fail = 0;
tracer_tot = 0;
#if RADIATION == RADTYPE_NEUTRINOS
TYPELOOP rad_type_counts[itp] = 0.0;
#endif
#endif
}
void reset_dump_variables() {
#if RADIATION
memset(nuLnu, 0, (NULNU_IDX0)*NTH * NPHI * NU_BINS_SPEC * sizeof(double));
memset(Nem, 0, N123G * sizeof(int));
memset(Nabs, 0, N123G * sizeof(int));
memset(Nsc, 0, N123G * sizeof(int));
memset(Nem_phys, 0, RAD_NUM_TYPES * N123G * sizeof(double));
memset(Nabs_phys, 0, RAD_NUM_TYPES * N123G * sizeof(double));
memset(Jrad, 0, (MAXNSCATT + 2) * N123G * sizeof(double));
memset(Esuper, 0, N123G * sizeof(double));
memset(Nsuper, 0, N123G * sizeof(int));
memset(radG_int, 0, RAD_NUM_TYPES * N123G * sizeof(double));
memset(dtau_avg, 0, (RAD_SCATT_TYPES + 1) * N123G * sizeof(double));
memset(en_int_avg, 0, (RAD_SCATT_TYPES + 1) * N123G * sizeof(double));
#endif
}
void diag(int call_code) {
double U[NVAR], divb;
double pp = 0.;
double divbmax = 0.;
double rmed = 0.;
double e = 0.;
struct of_geom *geom;
struct of_state q;
static FILE * ener_file;
// Write diagnostics to dump directory
char log_fnam[STRLEN];
strcpy(log_fnam, dumpdir);
strcat(log_fnam, "diag.out");
if (call_code == DIAG_INIT) {
// Set things up
if (mpi_io_proc()) {
if (is_restart) {
ener_file = fopen(log_fnam, "a");
} else {
ener_file = fopen(log_fnam, "w");
}
if (ener_file == NULL) {
fprintf(stderr, "error opening energy output file\n");
exit(1);
}
}
// dOmega = sin(theta) dtheta = -d cos(theta)
/*#if RADIATION
double dOtot = 0.;
JSLOOP(0, N2-1) {
KSLOOP(0, N3-1) {
int i = N1+NG-1;
double XL[NDIM], XR[NDIM], thL, thR;
coord(i, j, k, FACE2, XL);
coord(i, j+1, k, FACE2, XR);
thL = th_of_X(XL);
thR = th_of_X(XR);
dOmega[j][k] = 2.*M_PI/(N3TOT)*(-cos(thR) + cos(thL));
dOtot += dOmega[j][k];
}
}
#endif // RADIATION*/
}
// Calculate conserved quantities
if ((call_code == DIAG_INIT || call_code == DIAG_LOG ||
call_code == DIAG_FINAL) &&
!failed) {
pp = 0.;
e = 0.;
rmed = 0.;
divbmax = 0.;
ZSLOOP(0, N1 - 1, 0, N2 - 1, 0, N3 - 1) {
geom = get_geometry(i, j, k, CENT);
get_state(P[i][j][k], geom, &q);
primtoflux(P[i][j][k], &q, 0, 0, geom, U);
rmed += U[RHO] * dV;
pp += U[U3] * dV;
e += U[UU] * dV;
divb = flux_ct_divb(i, j, k);
if (divb > divbmax) {
divbmax = divb;
}
}
}
rmed = mpi_io_reduce(rmed);
pp = mpi_io_reduce(pp);
e = mpi_io_reduce(e);
divbmax = mpi_io_max(divbmax);
#if RADIATION
set_Rmunu();
#endif
// Get total mass and energy
double mass_proc = 0.;
double egas_proc = 0.;
#if RADIATION
double erad_proc = 0.;
#endif
double Phi_proc = 0.;
double jet_EM_flux_proc = 0.;
double lum_eht_proc = 0.;
ZLOOP {
#if EOS == EOS_TYPE_TABLE
EOS_SC_fill(P[i][j][k], extra[i][j][k]);
#endif
struct of_state q;
double U[NVAR];
get_state(P[i][j][k], &(ggeom[i][j][CENT]), &q);
primtoflux(P[i][j][k], &q, 0, 0, &(ggeom[i][j][CENT]), U);
mass_proc += U[0] * dx[1] * dx[2] * dx[3];
egas_proc += U[1] * dx[1] * dx[2] * dx[3];
double rho = P[i][j][k][RHO];
double ug = P[i][j][k][UU];
double Pg = EOS_pressure_rho0_u(rho, ug, extra[i][j][k]);
double bsq = dot(q.bcon, q.bcov);
double Bmag = sqrt(bsq);
double C_eht = 0.2;
double j_eht = pow(rho, 3.) * pow(Pg, -2.) *
exp(-C_eht * pow(rho * rho / (Bmag * Pg * Pg), 1. / 3.));
lum_eht_proc += j_eht * dx[1] * dx[2] * dx[3] * ggeom[i][j][CENT].g;
if (global_start[1] == 0 && i == 5 + NG) {
Phi_proc +=
0.5 * fabs(P[i][j][k][B1]) * dx[2] * dx[3] * ggeom[i][j][CENT].g;
double P_EM[NVAR];
PLOOP P_EM[ip] = P[i][j][k][ip];
P_EM[RHO] = 0.;
P_EM[UU] = 0.;
get_state(P_EM, &(ggeom[i][j][CENT]), &q);
double sig = dot(q.bcon, q.bcov) / P[i][j][k][RHO];
if (sig > 1.) {
primtoflux(P_EM, &q, 1, 1, &(ggeom[i][j][CENT]), U);
jet_EM_flux_proc += -U[U1] * dx[2] * dx[3];
}
}
#if RADIATION
erad_proc +=
Rmunu[i][j][k][0][0] * dx[1] * dx[2] * dx[3] * ggeom[i][j][CENT].g;
#endif
}
double mdot_all = mpi_io_reduce(mdot);
double edot_all = mpi_io_reduce(edot);
double ldot_all = mpi_io_reduce(ldot);
double mdot_eh_all = mpi_io_reduce(mdot_eh);
double edot_eh_all = mpi_io_reduce(edot_eh);
double ldot_eh_all = mpi_io_reduce(ldot_eh);
double mass = mpi_reduce(mass_proc);
double egas = mpi_reduce(egas_proc);
double Phi = mpi_reduce(Phi_proc);
double phi = Phi / sqrt(mdot_all + SMALL);
double jet_EM_flux = mpi_reduce(jet_EM_flux_proc);
double lum_eht = mpi_reduce(lum_eht_proc);
#if RADIATION
double erad = mpi_reduce(erad_proc);
double lum_proc = 0.;
// Get last shell entirely enclosed by stopx_rad[1] (assume r(X^1) independent
// of X^2, X^3)
int stopi_rad = -1;
for (int i = NG + 1; i <= N1 + NG; i++) {
double X[NDIM], Xprev[NDIM];
coord(i - 1, NG, NG, FACE1, Xprev);
coord(i, NG, NG, FACE1, X);
if (X[1] > stopx_rad[1] && Xprev[1] < stopx_rad[1]) {
stopi_rad = i - 2;
}
}
if (stopi_rad == -1)
stopi_rad = N1 + NG - 1;
if (stopi_rad >= 0) {
JSLOOP(0, N2 - 1) {
KSLOOP(0, N3 - 1) {
lum_proc -= Rmunu[stopi_rad][j][k][1][0] * dx[2] * dx[3] *
ggeom[stopi_rad][j][CENT].g;
}
}
}
double lum = mpi_reduce(lum_proc);
double eff = lum / (mdot + SMALL);
#if ELECTRONS
int num_super = 0.;
double lum_super = 0.;
ZLOOP {
num_super += Nsuper[i][j][k];
lum_super += Esuper[i][j][k];
}
num_super = mpi_reduce_int(num_super);
lum_super = mpi_reduce(lum_super) / DTd;
#endif
#endif // RADIATION
if (call_code == DIAG_DUMP) {
dump();
#if RADIATION && TRACERS
dump_tracers();
#endif
dump_cnt++;
reset_dump_variables();
}
if (call_code == DIAG_FINAL) {
dump();
#if RADIATION && TRACERS
dump_tracers();
#endif
reset_dump_variables();
}
if (call_code == DIAG_INIT || call_code == DIAG_LOG ||
call_code == DIAG_FINAL) {
if (mpi_io_proc()) {
#if EOS == EOS_TYPE_TABLE
EOS_SC_fill(P[N1 / 2][N2 / 2][N3 / 2], extra[N1 / 2][N2 / 2][N3 / 2]);
#endif
fprintf(stdout, "LOG t=%g \t divbmax: %g\n", t, divbmax);
#if RADIATION
#if RADIATION == RADTYPE_NEUTRINOS
fprintf(stdout, " nleptons = %g \t %%change = %g\n", lepton_tot,
dlepton_perc);
#endif
#endif
fprintf(ener_file, "%10.5g %10.5g %10.5g %10.5g %15.8g %15.8g ", t, rmed,
pp, e,
EOS_adiabatic_constant(P[N1 / 2][N2 / 2][N3 / 2][RHO],
P[N1 / 2][N2 / 2][N3 / 2][UU], extra[N1 / 2][N2 / 2][N3 / 2]),
P[N1 / 2][N2 / 2][N3 / 2][UU]);
fprintf(ener_file, "%15.8g %15.8g %15.8g ", mdot_all, edot_all, ldot_all);
fprintf(ener_file, "%15.8g %15.8g ", mass, egas);
fprintf(ener_file, "%15.8g %15.8g %15.8g ", Phi, phi, jet_EM_flux);
fprintf(ener_file, "%15.8g ", divbmax);
#if RADIATION
printf("step_abs = %i step_abs_tot = %i\n", step_abs, step_abs_all);
fprintf(ener_file, "%i %i %i %i %i %i %i %i ", step_made, step_abs,
step_scatt, step_lost, step_rec, step_tot, step_sent, step_rcvd);
fprintf(ener_file, "%i %i %i %i %i %i %i %i ", step_made_all,
step_abs_all, step_scatt_all, step_lost_all, step_rec_all,
step_tot_all, step_sent_all, step_rcvd_all);
fprintf(ener_file, "%15.8g ", load_imbalance);
fprintf(ener_file, "%15.8g %15.8g ", tune_emiss, tune_scatt);
fprintf(ener_file, "%15.8g %15.8g %15.8g ", erad, lum, eff);
#if ELECTRONS
fprintf(ener_file, "%i %15.8g ", num_super, lum_super);
#endif
#if RADIATION == RADTYPE_NEUTRINOS
fprintf(ener_file, "%15.8g %15.8g %15.8g ", lepton_tot, dlepton_tot,
dlepton_perc);
#endif
#if TRACERS
fprintf(ener_file, "%i ", tracer_tot_all);
fprintf(ener_file, "%i ", step_tot_all - tracer_tot_all);
#endif
#endif
fprintf(ener_file, "%15.8g ", lum_eht);
fprintf(ener_file, "%15.8g %15.8g %15.8g ", mdot_eh_all, edot_eh_all,
ldot_eh_all);
fprintf(ener_file,
"%15.8g %15.8g %15.8g %15.8g "
"%15.8g %15.8g %15.8g %15.8g %15.8g %15.8g %15.8g ",
get_time_per_step(TIMER_UPDATE), get_time_per_step(TIMER_FLUXCALC),
get_time_per_step(TIMER_FIXUP), get_time_per_step(TIMER_BOUND),
get_time_per_step(TIMER_DIAG), get_time_per_step(TIMER_OUT),
get_time_per_step(TIMER_MAKE), get_time_per_step(TIMER_PUSH),
get_time_per_step(TIMER_INTERACT), get_time_per_step(TIMER_MICRO),
get_time_per_step(TIMER_ALL));
#if ELECTRONS
fprintf(ener_file, "%15.8g ", get_time_per_step(TIMER_ELECTRON));
#endif
fprintf(ener_file, "\n");
fflush(ener_file);
}
#if RADIATION
track_ph();
#endif
}
}
// Diagnostic routines
void fail(int fail_type) {
failed = 1;
fprintf(stderr, "\n\nFAIL: [%d %d %d] %d\n", icurr, jcurr, kcurr, fail_type);
area_map(icurr, jcurr, kcurr, P);
diag(DIAG_FINAL);
exit(0);
}
// Map out region around failure point
void area_map(int i, int j, int k, grid_prim_type prim) {
fprintf(stderr, "*** AREA MAP ***\n");
PLOOP {
fprintf(stderr, "variable %d \n", ip);
fprintf(stderr, "i = \t %12d %12d %12d\n", i - 1, i, i + 1);
fprintf(stderr, "j = %d \t %12.5g %12.5g %12.5g\n", j + 1,
prim[i - 1][j + 1][k][ip], prim[i][j + 1][k][ip],
prim[i + 1][j + 1][k][ip]);
fprintf(stderr, "j = %d \t %12.5g %12.5g %12.5g\n", j,
prim[i - 1][j][k][ip], prim[i][j][k][ip], prim[i + 1][j][k][ip]);
fprintf(stderr, "j = %d \t %12.5g %12.5g %12.5g\n", j - 1,
prim[i - 1][j - 1][k][ip], prim[i][j - 1][k][ip],
prim[i + 1][j - 1][k][ip]);
}
fprintf(stderr, "****************\n");
}
// Evaluate flux based diagnostics; put results in global variables
void diag_flux(grid_prim_type F1, grid_prim_type F2, grid_prim_type F3) {
mdot = edot = ldot = 0.;
mdot_eh = edot_eh = ldot_eh = 0.;
int iEH = NG + 5;
if (global_start[1] == 0) {
#pragma omp parallel for \
reduction(+:mdot) reduction(+:edot) reduction(+:ldot) \
reduction(+:mdot_eh) reduction(+:edot_eh) reduction(+:ldot_eh) \
collapse(2)
JSLOOP(0, N2 - 1) {
KSLOOP(0, N3 - 1) {
mdot += F1[NG][j][k][RHO] * dx[2] * dx[3];
edot += (F1[NG][j][k][UU] - F1[NG][j][k][RHO]) * dx[2] * dx[3];
ldot += F1[NG][j][k][U3] * dx[2] * dx[3];
mdot_eh += F1[iEH][j][k][RHO] * dx[2] * dx[3];
edot_eh += (F1[iEH][j][k][UU] - F1[iEH][j][k][RHO]) * dx[2] * dx[3];
ldot_eh += F1[iEH][j][k][U3] * dx[2] * dx[3];
}
}
}
}
double flux_ct_divb(int i, int j, int k) {
return fabs(
0.25 *
(P[i][j][k][B1] * ggeom[i][j][CENT].g +
P[i][j - 1][k][B1] * ggeom[i][j - 1][CENT].g +
P[i][j][k - 1][B1] * ggeom[i][j][CENT].g +
P[i][j - 1][k - 1][B1] * ggeom[i][j - 1][CENT].g -
P[i - 1][j][k][B1] * ggeom[i - 1][j][CENT].g -
P[i - 1][j - 1][k][B1] * ggeom[i - 1][j - 1][CENT].g -
P[i - 1][j][k - 1][B1] * ggeom[i - 1][j][CENT].g -
P[i - 1][j - 1][k - 1][B1] * ggeom[i - 1][j - 1][CENT].g) /
dx[1] +
0.25 *
(P[i][j][k][B2] * ggeom[i][j][CENT].g +
P[i - 1][j][k][B2] * ggeom[i - 1][j][CENT].g +
P[i][j][k - 1][B2] * ggeom[i][j][CENT].g +
P[i - 1][j][k - 1][B2] * ggeom[i - 1][j][CENT].g -
P[i][j - 1][k][B2] * ggeom[i][j - 1][CENT].g -
P[i - 1][j - 1][k][B2] * ggeom[i - 1][j - 1][CENT].g -
P[i][j - 1][k - 1][B2] * ggeom[i][j - 1][CENT].g -
P[i - 1][j - 1][k - 1][B2] * ggeom[i - 1][j - 1][CENT].g) /
dx[2] +
0.25 *
(P[i][j][k][B3] * ggeom[i][j][CENT].g +
P[i][j - 1][k][B3] * ggeom[i][j - 1][CENT].g +
P[i - 1][j][k][B3] * ggeom[i - 1][j][CENT].g +
P[i - 1][j - 1][k][B3] * ggeom[i - 1][j - 1][CENT].g -
P[i][j][k - 1][B3] * ggeom[i][j][CENT].g -
P[i][j - 1][k - 1][B3] * ggeom[i][j - 1][CENT].g -
P[i - 1][j][k - 1][B3] * ggeom[i - 1][j][CENT].g -
P[i - 1][j - 1][k - 1][B3] * ggeom[i - 1][j - 1][CENT].g) /
dx[3]);
}
#if RADIATION
void record_superphoton(double X[NDIM], struct of_photon *ph) {
// Do not do this for tracers
if (ph->type == TYPE_TRACER)
return;
double lnumin = log(numin);
double lnumax = log(numax);
double dlnu = (lnumax - lnumin) / NU_BINS_SPEC;
int i, j, k;
Xtoijk(X, &i, &j, &k);
int nscatt = ph->nscatt;
nscatt = MY_MIN(nscatt, MAXNSCATT);
// Preserve index sanity
if (j < NG)
j = N2 + j;
if (j >= N2 + NG)
j = j - N2;
if (k < NG) {
if (N3CPU == 1)
k = N3 + k;
else
k = NG;
}
if (k >= N3 + NG) {
if (N3CPU == 1)
k = k - N3;
else
k = N3 + NG - 1;
}
// Assume X0 symmetry in metric
double nu = -ph->Kcov[2][0] * ME * CL * CL / HPL;
int thbin, phibin, nubin = (log(nu) - lnumin) / dlnu;
get_nuLnu_bin(X, &thbin, &phibin);
// Store dE / dlognu dOmega dt
if (nubin >= 0 && nubin < NU_BINS_SPEC) {
#if DIAGNOSTICS_USE_RADTYPES
{
#pragma omp atomic
nuLnu[ph->type][thbin][phibin][nubin] -=
ph->w * ph->Kcov[2][0] * ME * CL * CL / (dlnu * DTd * T_unit);
}
#else
{
#pragma omp atomic
nuLnu[nscatt][thbin][phibin][nubin] -=
ph->w * ph->Kcov[2][0] * ME * CL * CL / (dlnu * DTd * T_unit);
}
#endif
#pragma omp atomic
step_rec++;
}
}
void report_load_imbalance() {
if (mpi_io_proc()) {
fprintf(stdout,
"\n******** LOAD IMBALANCE *********\n"
" good == 0 <= %.2f <= 1 == bad\n"
" step_tot_min = %d\n"
" step_tot_max = %d\n"
"*********************************\n\n",
load_imbalance, step_tot_min, step_tot_max);
}
}
void bin_all_superphotons() {
#pragma omp parallel
{
struct of_photon *ph = photon_lists[omp_get_thread_num()];
while (ph != NULL) {
if (ph->type != TYPE_TRACER) {
bin_superphoton_direction(ph);
}
ph = ph->next;
}
}
}
#if RADIATION == RADTYPE_NEUTRINOS
void count_leptons(grid_prim_type P, double dt, int nstep) {
timer_start(TIMER_DIAG);
lepton_lost_step = mpi_reduce(lepton_lost_local);
// printf("lepton lost on %d step = %g\n", mpi_myrank(), lepton_lost_local);
// if (mpi_io_proc()) {
// printf("lepton lost this step = %g\n", lepton_lost_step);
// printf("lepton lost last = %g\n", lepton_lost);
// printf("lepton lost now = %g\n", lepton_lost + lepton_lost_step);
// }
lepton_last = lepton_tot;
lepton_lost += lepton_lost_step;
lepton_lost_local = 0.0;
lepton_gas = lepton_rad = 0.0;
double reference_vol = dx[1] * dx[2] * dx[3] * pow(L_unit, 3.);
#pragma omp parallel for collapse(3)
ZLOOP {
double gamma, alpha, ucon0;
mhd_gamma_calc(P[i][j][k], &(ggeom[i][j][CENT]), &gamma);
alpha = ggeom[i][j][CENT].alpha;
ucon0 = gamma / alpha;
double nb_cell = ucon0 * P[i][j][k][RHO] * RHO_unit / MP;
double n_cell = nb_cell * P[i][j][k][YE];
double cell_vol = reference_vol * ggeom[i][j][CENT].g;
double lepton_cell = n_cell * cell_vol;
#pragma omp atomic
lepton_gas += lepton_cell;
}
lepton_gas = mpi_reduce(lepton_gas);
#pragma omp parallel
{
struct of_photon *ph = photon_lists[omp_get_thread_num()];
while (ph != NULL) {
if (ph->type != TYPE_TRACER) {
#pragma omp atomic
lepton_rad += (ph->w) * get_lepton_sign(ph);
}
ph = ph->next;
}
}
lepton_rad = mpi_reduce(lepton_rad);
lepton_tot = lepton_rad + lepton_gas + lepton_lost;
dlepton_tot = (lepton_tot - lepton_last) / dt;
dlepton_perc = 100. * (lepton_tot - lepton_last) / lepton_tot;
// diagnostic currently only works when mpi is off. I don't know why.
#if COMPLAIN_ON_LEPTON_NONCON
if (nstep > 1 && fabs(dlepton_perc) > DLEPTON_THRESH && mpi_nprocs() > 1) {
if (mpi_io_proc()) {
double dlep_abs = (lepton_tot - lepton_last);
fprintf(stderr,
"Lepton number not conserved!\n"
"\tlepton_gas = %15.8g\n"
"\tlepton_rad = %15.8g\n"
"\tlepton_lost = %15.8g\n"
"\tlepton_tot = %15.8g\n"
"\tlepton_last = %15.8g\n"
"\tdlep_abs = %15.8g\n"
"\tdlepdt = %15.8g\n"
"\tdlepton_perc = %15.8g\n",
lepton_gas, lepton_rad, lepton_lost, lepton_tot, lepton_last,
dlep_abs, dlepton_tot, dlepton_perc);
}
// uncomment this when the code actually works
exit(1);
}
#endif
timer_stop(TIMER_DIAG);
}
void print_rad_types() {
timer_start(TIMER_DIAG);
double total_count = 0;
TYPELOOP rad_type_counts[itp] = 0.0;
#pragma omp parallel
{
struct of_photon *ph = photon_lists[omp_get_thread_num()];
while (ph != NULL) {
if (ph->type != TYPE_TRACER) {
#pragma omp atomic
rad_type_counts[ph->type] += ph->w;
}
ph = ph->next;
}
}
TYPELOOP rad_type_counts[itp] = mpi_reduce(rad_type_counts[itp]);
TYPELOOP total_count += rad_type_counts[itp];
TYPELOOP rad_type_counts[itp] /= total_count;
TYPELOOP rad_type_counts[itp] *= 100.0;
if (mpi_io_proc()) {
fprintf(stdout, "\n********* NEUTRINO TYPES ********\n");
fprintf(stdout, " TOTAL COUNT = %.4g \n", total_count);
fprintf(stdout, "*********************************\n");
fprintf(stdout, " TYPE PERCENTAGE\n");
fprintf(stdout, "*********************************\n");
fprintf(stdout, " ELECTRON %.2f %%\n",
rad_type_counts[NU_ELECTRON]);
fprintf(stdout, " ANTI %.2f %%\n",
rad_type_counts[ANTINU_ELECTRON]);
fprintf(stdout, " HEAVY %.2f %%\n",
rad_type_counts[NU_HEAVY]);
fprintf(stdout, "*********************************\n\n");
}
timer_stop(TIMER_DIAG);
}
#endif
#endif // RADIATION
|
cpu_bound.c | /*
* Copyright (c) 2009, 2010, 2011, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <stdint.h>
#include <omp.h>
#include <arch/x86/barrelfish_kpi/asm_inlines_arch.h>
#define WORK_PERIOD 5000000000UL
#define STACK_SIZE (64 * 1024)
int main(int argc, char *argv[])
{
uint64_t now, start;
volatile uint64_t workcnt, workload = 0;
int64_t workmax = 1000;
int64_t i;
if(argc == 1) {
printf("calibrating...\n");
do {
workload = 0;
workmax *= 2;
start = rdtsc();
for(i = 0; i < workmax; i++) {
workload++;
}
now = rdtsc();
} while(now - start < WORK_PERIOD);
// Compute so the max number of CPUs would calc for WORK_PERIOD
workmax *= omp_get_num_procs();
printf("workmax = %ld\n", workmax);
return 0;
} else {
workmax = atol(argv[1]);
}
int nthreads = omp_get_max_threads();
if(argc == 3) {
nthreads = atoi(argv[2]);
backend_span_domain(nthreads, STACK_SIZE);
bomp_custom_init(NULL);
omp_set_num_threads(nthreads);
}
printf("threads %d, workmax %ld, CPUs %d\n", nthreads, workmax,
omp_get_num_procs());
start = rdtsc();
// Do some work
#pragma omp parallel for private(workcnt)
for(i = 0; i < workmax; i++) {
workcnt++;
}
now = rdtsc();
printf("%s: threads %d, compute time %lu ticks\n", argv[0], nthreads, now - start);
for(;;);
return 0;
}
|
task_untied.c | /*
* the task-generation loop is put into a untied task
* So when the thread running task-generation loop get preempted to
* conduct the generated tasks,
* the other threads can resume the task-generation task, which
* is not tied to the original thread.
*/
#include <stdio.h>
#include <omp.h>
#define LARGE_NUMBER 10
//#define LARGE_NUMBER 10000000
double item[LARGE_NUMBER];
void process (double input)
{
printf("processing %f by thread %d\n",input, omp_get_thread_num());
}
int
main ()
{
#pragma omp parallel
{
#pragma omp single
{
int i;
printf("Using %d threads.\n",omp_get_num_threads());
/*
untied cannot be used with omp single
So another level of task is needed to used untied here!!
Explicit tasks:
* generated by #omp task
* tied or untied
Implicit tasks:
* generated by #omp parallel
* always tied!!
*/
#pragma omp task untied
// i is firstprivate, item is shared
{
for (i = 0; i < LARGE_NUMBER; i++)
{
#pragma omp task if(1)
process (item[i]);
}
}
}
}
return 0;
}
|
taskwait.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
#include <omp.h>
int main()
{
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
#pragma omp task
{
x++;
}
#pragma omp taskwait
print_current_address(1);
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_taskwait_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_taskwait_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_taskwait_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: ompt_event_taskwait_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
FFVMC.h | #ifndef FFVMC3D_H
#define FFVMC3D_H
#include <vector>
#include <fstream>
#include <sstream>
#include <iostream>
#include <sys/stat.h>
#include "BCMTools.h"
#include "BlockManager.h"
#include "Scalar3D.h"
#include "BCMOctree.h"
#include "Partition.h"
typedef struct _Vertex {
double x;
double y;
double z;
double value;
}Vertex;
extern const int edgeTable[256];
extern const int triTable[256][16];
extern Vertex GetIntersection(Vertex v1, Vertex v2, double p1, double p2, double threshold);
class FFVMC {
public:
FFVMC()
: blockManager(BlockManager::getInstance()),
comm(blockManager.getCommunicator()) {
int myrank = comm.Get_rank();
for(int id=0; id<blockManager.getNumBlock(); ++id) {
BlockBase* block = blockManager.getBlock(id);
}
if( myrank == 0 ) {
} else {
}
PNX = 0;
PNY = 0;
PNZ = 0;
pPointData = 0;
vVertexList.clear();
}
virtual ~FFVMC() {
}
private:
BlockManager& blockManager;
const MPI::Intracomm& comm;
public:
template <typename T>
void writeContour(
int dataClassID,
int vc,
const std::string path,
const std::string prefix,
const std::string name,
int step,
int maxLevel,
int minLevel,
RootGrid* rootGrid,
BCMOctree* tree,
Partition* partition,
Vec3r rootOrigin,
double rootLength,
double threshold) {
std::ostringstream ossFileNameTime;
ossFileNameTime << path;
mkdir(ossFileNameTime.str().c_str(), 0755);
ossFileNameTime << "/";
ossFileNameTime.width(10);
ossFileNameTime.setf(std::ios::fixed);
ossFileNameTime.fill('0');
ossFileNameTime << step;
mkdir(ossFileNameTime.str().c_str(), 0755);
const Vec3i& size = blockManager.getSize();
int myrank = comm.Get_rank();
for (int id = 0; id < blockManager.getNumBlock(); ++id) {
BlockBase* block = blockManager.getBlock(id);
Vec3i size = block->getSize();
Vec3r origin = block->getOrigin();
Vec3r blockSize = block->getBlockSize();
Vec3r cellSize = block->getCellSize();
int level = block->getLevel();
Scalar3D<T>* s = dynamic_cast<Scalar3D<T>*>(block->getDataClass(dataClassID));
T* sData = s->getData();
int nx = size.x;
int ny = size.y;
int nz = size.z;
int nv = vc;
double ox = origin.x;
double oy = origin.y;
double oz = origin.z;
double dx = cellSize.x;
writeContourLocal(
sData,
threshold,
path,
prefix,
name,
step, myrank, id,
nx, ny, nz,
nv,
ox, oy, oz,
dx);
}
if( myrank != 0 ) {
return;
}
std::ostringstream ossFileName;
ossFileName << path;
ossFileName << "/";
ossFileName << prefix;
ossFileName << name.c_str();
ossFileName << "-";
ossFileName.width(10);
ossFileName.setf(std::ios::fixed);
ossFileName.fill('0');
ossFileName << step;
ossFileName << ".pvtp";
std::ofstream ofs;
ofs.open(ossFileName.str().c_str(), std::ios::out);
ofs << "<VTKFile type=\"PPolyData\" version=\"0.1\">" << std::endl;
ofs << "<PPolyData GhostLevel=\"0\">" << std::endl;
ofs << "<PPointData>" << std::endl;
ofs << "<PDataArray type=\"Float32\" Name=\"";
ofs << name;
ofs << "\" format=\"ascii\"/>" << std::endl;
ofs << "</PPointData>" << std::endl;
ofs << "<PCellData>" << std::endl;
ofs << "</PCellData>" << std::endl;
ofs << "<PPoints>" << std::endl;
ofs << "<PDataArray type=\"Float32\" Name=\"Points\" NumberOfComponents=\"3\" format=\"ascii\"/>" << std::endl;
ofs << "</PPoints>" << std::endl;
std::vector<Node*>& leafNodeArray = tree->getLeafNodeArray();
for (int iRank = 0; iRank < comm.Get_size(); iRank++) {
for (int id = partition->getStart(iRank); id < partition->getEnd(iRank); id++) {
Node* node = leafNodeArray[id];
Vec3r origin = tree->getOrigin(node) * rootLength;
Vec3r blockSize = node->getBlockSize() * rootLength;
Vec3r cellSize;
cellSize.x = blockSize.x / size.x;
cellSize.y = blockSize.y / size.y;
cellSize.z = blockSize.z / size.z;
int level = node->getLevel();
std::ostringstream ossFileName2;
ossFileName2 << "./";
ossFileName2.width(10);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << step;
ossFileName2 << "/";
ossFileName2 << prefix;
ossFileName2 << name.c_str();
ossFileName2 << "-";
ossFileName2.width(5);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << iRank;
ossFileName2 << "-";
ossFileName2.width(5);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << id - partition->getStart(iRank);
ossFileName2 << "-";
ossFileName2.width(10);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << step;
ossFileName2 << ".vtp";
ofs << "<Piece Source=\"";
ofs << ossFileName2.str();
ofs << "\"/>" << std::endl;
}
}
ofs << "</PPolyData>" << std::endl;
ofs << "</VTKFile>" << std::endl;
ofs.close();
}
template <typename T>
void writeContourLocal(
T* pData,
double threshold,
const std::string path,
const std::string prefix,
const std::string name,
int step, int rank, int block,
int NX, int NY, int NZ,
int NV,
double ox, double oy, double oz,
double dx)
{
InitPointData(pData, NX, NY, NZ, NV);
ClearTriangles();
DetectTriangles(threshold, ox, oy, oz, dx);
PrintVTP(path, prefix, name, step, rank, block, threshold);
}
template <typename T>
void InitPointData(
T* pData,
int NX, int NY, int NZ,
int NV)
{
if( NX + 1 == PNX &&
NY + 1 == PNY &&
NZ + 1 == PNZ ) {
} else {
PNX = NX + 1;
PNY = NY + 1;
PNZ = NZ + 1;
if( pPointData ) {
delete [] pPointData;
pPointData = 0;
}
pPointData = new float [PNX*PNY*PNZ];
}
int CX = NX + 2*NV;
int CY = NY + 2*NV;
int CZ = NZ + 2*NV;
#pragma omp parallel for
for(int k=0; k<PNZ; k++) {
for(int j=0; j<PNY; j++) {
for(int i=0; i<PNX; i++) {
int i0 = NV + i;
int j0 = NV + j;
int k0 = NV + k;
int m0 = i0 + CX*(j0 + CY*k0);
int m1 = i0-1 + CX*(j0 + CY*k0);
int m2 = i0-1 + CX*((j0-1) + CY*k0);
int m3 = i0 + CX*((j0-1) + CY*k0);
int m4 = i0 + CX*(j0 + CY*(k0-1));
int m5 = i0-1 + CX*(j0 + CY*(k0-1));
int m6 = i0-1 + CX*((j0-1) + CY*(k0-1));
int m7 = i0 + CX*((j0-1) + CY*(k0-1));
T phi0 = pData[m0];
T phi1 = pData[m1];
T phi2 = pData[m2];
T phi3 = pData[m3];
T phi4 = pData[m4];
T phi5 = pData[m5];
T phi6 = pData[m6];
T phi7 = pData[m7];
int mp = i + PNX*(j + PNY*k);
pPointData[mp] = 0.125*(phi0 + phi1 + phi2 + phi3 + phi4 + phi5 + phi6 + phi7);
}
}
}
}
void ClearTriangles() {
vVertexList.clear();
}
void DetectTriangles(
double threshold,
double ox, double oy, double oz,
double dx)
{
#pragma omp parallel for
for(int k=0; k<PNZ-1; k++) {
for(int j=0; j<PNY-1; j++) {
for(int i=0; i<PNX-1; i++) {
Vertex v[8];
v[3].x = ox + dx*i;
v[3].y = oy + dx*j;
v[3].z = oz + dx*k;
v[2].x = ox + dx*(i+1);
v[2].y = oy + dx*j;
v[2].z = oz + dx*k;
v[1].x = ox + dx*(i+1);
v[1].y = oy + dx*(j+1);
v[1].z = oz + dx*k;
v[0].x = ox + dx*i;
v[0].y = oy + dx*(j+1);
v[0].z = oz + dx*k;
v[7].x = ox + dx*i;
v[7].y = oy + dx*j;
v[7].z = oz + dx*(k+1);
v[6].x = ox + dx*(i+1);
v[6].y = oy + dx*j;
v[6].z = oz + dx*(k+1);
v[5].x = ox + dx*(i+1);
v[5].y = oy + dx*(j+1);
v[5].z = oz + dx*(k+1);
v[4].x = ox + dx*i;
v[4].y = oy + dx*(j+1);
v[4].z = oz + dx*(k+1);
double p[8];
p[3] = pPointData[i + PNX*(j + PNY*k)];
p[2] = pPointData[i+1 + PNX*(j + PNY*k)];
p[1] = pPointData[i+1 + PNX*(j+1 + PNY*k)];
p[0] = pPointData[i + PNX*(j+1 + PNY*k)];
p[7] = pPointData[i + PNX*(j + PNY*(k+1))];
p[6] = pPointData[i+1 + PNX*(j + PNY*(k+1))];
p[5] = pPointData[i+1 + PNX*(j+1 + PNY*(k+1))];
p[4] = pPointData[i + PNX*(j+1 + PNY*(k+1))];
int cubeindex = 0;
if( p[0] < threshold ) {
cubeindex |= 1;
}
if( p[1] < threshold ) {
cubeindex |= 2;
}
if( p[2] < threshold ) {
cubeindex |= 4;
}
if( p[3] < threshold ) {
cubeindex |= 8;
}
if( p[4] < threshold ) {
cubeindex |= 16;
}
if( p[5] < threshold ) {
cubeindex |= 32;
}
if( p[6] < threshold ) {
cubeindex |= 64;
}
if( p[7] < threshold ) {
cubeindex |= 128;
}
Vertex u[12];
if( edgeTable[cubeindex] & 1 ) {
u[0] = GetIntersection(v[0], v[1], p[0], p[1], threshold);
}
if( edgeTable[cubeindex] & 2 ) {
u[1] = GetIntersection(v[1], v[2], p[1], p[2], threshold);
}
if( edgeTable[cubeindex] & 4 ) {
u[2] = GetIntersection(v[2], v[3], p[2], p[3], threshold);
}
if( edgeTable[cubeindex] & 8 ) {
u[3] = GetIntersection(v[3], v[0], p[3], p[0], threshold);
}
if( edgeTable[cubeindex] & 16 ) {
u[4] = GetIntersection(v[4], v[5], p[4], p[5], threshold);
}
if( edgeTable[cubeindex] & 32 ) {
u[5] = GetIntersection(v[5], v[6], p[5], p[6], threshold);
}
if( edgeTable[cubeindex] & 64 ) {
u[6] = GetIntersection(v[6], v[7], p[6], p[7], threshold);
}
if( edgeTable[cubeindex] & 128 ) {
u[7] = GetIntersection(v[7], v[4], p[7], p[4], threshold);
}
if( edgeTable[cubeindex] & 256 ) {
u[8] = GetIntersection(v[0], v[4], p[0], p[4], threshold);
}
if( edgeTable[cubeindex] & 512 ) {
u[9] = GetIntersection(v[1], v[5], p[1], p[5], threshold);
}
if( edgeTable[cubeindex] & 1024 ) {
u[10] = GetIntersection(v[2], v[6], p[2], p[6], threshold);
}
if( edgeTable[cubeindex] & 2048 ) {
u[11] = GetIntersection(v[3], v[7], p[3], p[7], threshold);
}
for(int n=0; n<12; n++) {
u[n].value = threshold;
}
#pragma omp critical
{
int nTriangle = 0;
for(int i=0; triTable[cubeindex][i] != -1; i+=3) {
vVertexList.push_back( u[triTable[cubeindex][i]] );
vVertexList.push_back( u[triTable[cubeindex][i+1]] );
vVertexList.push_back( u[triTable[cubeindex][i+2]] );
nTriangle++;
}
}
// std::cout << nTriangle << std::endl;
}
}
}
}
void PrintVTP(
const std::string path,
const std::string prefix,
const std::string name,
int step, int rank, int block,
double threshold)
{
std::ostringstream ossFileName2;
ossFileName2 << path;
ossFileName2 << "/";
ossFileName2.width(10);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << step;
ossFileName2 << "/";
ossFileName2 << prefix;
ossFileName2 << name.c_str();
ossFileName2 << "-";
ossFileName2.width(5);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << rank;
ossFileName2 << "-";
ossFileName2.width(5);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << block;
ossFileName2 << "-";
ossFileName2.width(10);
ossFileName2.setf(std::ios::fixed);
ossFileName2.fill('0');
ossFileName2 << step;
ossFileName2 << ".vtp";
int nPoints = vVertexList.size();
int nPolys = vVertexList.size()/3;
int nLines = 0;
std::ofstream ofs;
ofs.open(ossFileName2.str().c_str(), std::ios::out);
ofs << "<VTKFile type=\"PolyData\" version=\"0.1\" byte_order=\"";
#ifdef __FUJITSU
ofs << "BigEndian";
#else
ofs << "LittleEndian";
#endif
ofs << "\">" << std::endl;
ofs << "<PolyData>" << std::endl;
ofs << "<Piece NumberOfPoints=\"";
ofs << nPoints;
ofs << "\" NumberOfVerts=\"0\" NumberOfLines=\"";
ofs << nLines;
ofs << "\" NumberOfStrips=\"0\" NumberOfPolys=\"";
ofs << nPolys;
ofs << "\">" << std::endl;
ofs << "<Points>" << std::endl;
ofs << "<DataArray type=\"Float32\" Name=\"Points\" NumberOfComponents=\"3\" format=\"ascii\">" << std::endl;
for(int i=0; i<vVertexList.size(); i++) {
ofs << vVertexList[i].x << " " << vVertexList[i].y << " " << vVertexList[i].z << std::endl;
}
ofs << "</DataArray>" << std::endl;
ofs << "</Points>" << std::endl;
ofs << "<Polys>" << std::endl;
ofs << "<DataArray type=\"Int32\" Name=\"connectivity\" format=\"ascii\">" << std::endl;
for(int i=0; i<vVertexList.size()/3; i++) {
ofs << 3*i << " " << 3*i+1 << " " << 3*i+2 << std::endl;
}
ofs << "</DataArray>" << std::endl;
ofs << "<DataArray type=\"Int32\" Name=\"offsets\" format=\"ascii\">" << std::endl;
for(int i=0; i<vVertexList.size()/3; i++) {
ofs << 3*(i+1) << std::endl;
}
ofs << "</DataArray>" << std::endl;
ofs << "</Polys>" << std::endl;
ofs << "<Lines>" << std::endl;
ofs << "</Lines>" << std::endl;
ofs << "<Verts>" << std::endl;
ofs << "</Verts>" << std::endl;
ofs << "<Strips>" << std::endl;
ofs << "</Strips>" << std::endl;
ofs << "<PointData>" << std::endl;
ofs << "<DataArray type=\"Float32\" Name=\"";
ofs << name;
ofs << "\" format=\"ascii\">" << std::endl;
for(int i=0; i<vVertexList.size(); i++) {
ofs << vVertexList[i].value << std::endl;
}
ofs << "</DataArray>" << std::endl;
ofs << "</PointData>" << std::endl;
ofs << "<CellData>" << std::endl;
ofs << "</CellData>" << std::endl;
ofs << "</Piece>" << std::endl;
ofs << "</PolyData>" << std::endl;
ofs << "</VTKFile>" << std::endl;
ofs.close();
}
private:
private:
int PNX;
int PNY;
int PNZ;
float *pPointData;
std::vector<Vertex> vVertexList;
};
#endif
|
GB_unop__cosh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cosh_fp64_fp64)
// op(A') function: GB (_unop_tran__cosh_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = cosh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cosh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = cosh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COSH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cosh_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = cosh (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = cosh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cosh_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
countpairs_s_mu_mocks_impl_double.c | /* This file is auto-generated from countpairs_s_mu_mocks_impl.c.src */
#ifndef DOUBLE_PREC
#define DOUBLE_PREC
#endif
// # -*- mode: c -*-
/* File: countpairs_s_mu_mocks_impl.c.src */
/*
This file is a part of the Corrfunc package
Copyright (C) 2015-- Manodeep Sinha (manodeep@gmail.com)
License: MIT LICENSE. See LICENSE file under the top-level
directory at https://github.com/manodeep/Corrfunc/
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <gsl/gsl_interp.h>
#include "countpairs_s_mu_mocks_impl_double.h"
#include "countpairs_s_mu_mocks_kernels_double.c"
#include "cellarray_mocks_double.h"
#include "gridlink_mocks_impl_double.h"
#include "defs.h"
#include "utils.h"
#include "cosmology_params.h"
#include "set_cosmo_dist.h"
#include "cpu_features.h"
#include "progressbar.h"
#include "proj_functions_double.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
int interrupt_status_DDsmu_mocks_double=EXIT_SUCCESS;
void interrupt_handler_countpairs_s_mu_mocks_double(int signo)
{
fprintf(stderr,"Received signal = `%s' (signo = %d). Aborting \n",strsignal(signo), signo);
interrupt_status_DDsmu_mocks_double = EXIT_FAILURE;
}
int check_ra_dec_cz_s_mu_double(const int64_t N, double *phi, double *theta, double *cz)
{
if(N==0) {
return EXIT_SUCCESS;
}
if(phi == NULL || theta == NULL || cz == NULL) {
fprintf(stderr,"Input arrays can not be NULL. Have RA = %p DEC = %p cz = %p\n", phi, theta, cz);
return EXIT_FAILURE;
}
int fix_cz = 0;
int fix_ra = 0;
int fix_dec = 0;
const double max_cz_threshold = 10.0;//if I find that max cz is smaller than this threshold, then I will assume z has been supplied rather than cz
double max_cz = 0.0;
//Check input cz -> ensure that cz contains cz and not z
for(int64_t i=0;i<N;i++) {
if(cz[i] > max_cz) max_cz = cz[i];
if(phi[i] < 0.0) {
fix_ra = 1;
}
if(theta[i] > 90.0) {
fix_dec = 1;
}
if(theta[i] > 180) {
fprintf(stderr,"theta[%"PRId64"] = %"REAL_FORMAT"should be less than 180 deg\n", i, theta[i]);
return EXIT_FAILURE;
}
}
if(max_cz < max_cz_threshold) fix_cz = 1;
//Only run the loop if something needs to be fixed
if(fix_cz==1 || fix_ra == 1 || fix_dec == 1) {
if(fix_ra == 1) {
fprintf(stderr,"%s> Out of range values found for ra. Expected ra to be in the range [0.0,360.0]. Found ra values in [-180,180] -- fixing that\n", __FUNCTION__);
}
if(fix_dec == 1) {
fprintf(stderr,"%s> Out of range values found for dec. Expected dec to be in the range [-90.0,90.0]. Found dec values in [0,180] -- fixing that\n", __FUNCTION__);
}
if(fix_cz == 1) {
fprintf(stderr,"%s> Out of range values found for cz. Expected input to be `cz' but found `z' instead. max_cz (found in input) = %"REAL_FORMAT" threshold "
"= %"REAL_FORMAT"\n",__FUNCTION__,max_cz,max_cz_threshold);
}
for(int64_t i=0;i<N;i++) {
if(fix_ra==1) {
phi[i] += (double) 180.0;
}
if(fix_dec==1) {
theta[i] -= (double) 90.0;
}
if(fix_cz == 1) {
cz[i] *= (double) SPEED_OF_LIGHT;//input was z -> convert to cz
}
}
}
return EXIT_SUCCESS;
}
countpairs_mocks_func_ptr_double countpairs_s_mu_mocks_driver_double(const struct config_options *options)
{
static countpairs_mocks_func_ptr_double function = NULL;
static isa old_isa=-1;
if(old_isa == options->instruction_set) {
return function;
}
/* Array of function pointers */
countpairs_mocks_func_ptr_double allfunctions[] = {
#ifdef __AVX__
countpairs_s_mu_mocks_avx_intrinsics_double,
#endif
#ifdef __SSE4_2__
countpairs_s_mu_mocks_sse_intrinsics_double,
#endif
countpairs_s_mu_mocks_fallback_double
};
const int num_functions = sizeof(allfunctions)/sizeof(void *);
const int fallback_offset = num_functions - 1;
#if defined(__AVX__) || defined __SSE4_2__
const int highest_isa = instrset_detect();
#endif
int curr_offset = 0;
/* Now check if AVX is supported by the CPU */
int avx_offset = fallback_offset;
#ifdef __AVX__
avx_offset = highest_isa >= 7 ? curr_offset:fallback_offset;
curr_offset++;
#endif
/* Is the SSE function supported at runtime and enabled at compile-time?*/
int sse_offset = fallback_offset;
#ifdef __SSE4_2__
sse_offset = highest_isa >= 6 ? curr_offset:fallback_offset;
curr_offset++;
#endif
if( curr_offset != fallback_offset) {
fprintf(stderr,"ERROR: Bug in code (current offset = %d *should equal* fallback function offset = %d)\n",
curr_offset, fallback_offset);
return NULL;
}
int function_dispatch=0;
/* Check that cpu supports feature */
if(options->instruction_set >= 0) {
switch(options->instruction_set) {
case(AVX512F):
case(AVX2):
case(AVX):function_dispatch=avx_offset;break;
case(SSE42): function_dispatch=sse_offset;break;
default:function_dispatch=fallback_offset;break;
}
}
if(function_dispatch >= num_functions) {
fprintf(stderr,"In %s> ERROR: Could not resolve the correct function.\n Function index = %d must lie between [0, %d)\n",
__FUNCTION__, function_dispatch, num_functions);
return NULL;
}
function = allfunctions[function_dispatch];
old_isa = options->instruction_set;
if(options->verbose){
// This must be first (AVX/SSE may be aliased to fallback)
if(function_dispatch == fallback_offset){
fprintf(stderr,"Using fallback kernel\n");
} else if(function_dispatch == avx_offset){
fprintf(stderr,"Using AVX kernel\n");
} else if(function_dispatch == sse_offset){
fprintf(stderr,"Using SSE kernel\n");
} else {
printf("Unknown kernel!\n");
}
}
return function;
}
int countpairs_mocks_s_mu_double(const int64_t ND1, double *ra1, double *dec1, double *czD1,
const int64_t ND2, double *ra2, double *dec2, double *czD2,
const int numthreads,
const int autocorr,
const char *sbinfile,
const double max_mu,
const int nmu_bins,
const int cosmology,
results_countpairs_mocks_s_mu *results,
struct config_options *options, struct extra_options *extra)
{
if(options->float_type != sizeof(double)) {
fprintf(stderr,"ERROR: In %s> Can only handle arrays of size=%zu. Got an array of size = %zu\n",
__FUNCTION__, sizeof(double), options->float_type);
return EXIT_FAILURE;
}
// If no extra options were passed, create dummy options
// This allows us to pass arguments like "extra->weights0" below;
// they'll just be NULLs, which is the correct behavior
struct extra_options dummy_extra;
if(extra == NULL){
weight_method_t dummy_method = NONE;
dummy_extra = get_extra_options(dummy_method);
extra = &dummy_extra;
}
int need_weightavg = extra->weight_method != NONE;
options->sort_on_z = 1;
struct timeval t0;
if(options->c_api_timer) {
gettimeofday(&t0, NULL);
}
if(options->fast_divide_and_NR_steps >= MAX_FAST_DIVIDE_NR_STEPS) {
fprintf(stderr, ANSI_COLOR_MAGENTA"Warning: The number of requested Newton-Raphson steps = %u is larger than max. allowed steps = %u."
" Switching to a standard divide"ANSI_COLOR_RESET"\n",
options->fast_divide_and_NR_steps, MAX_FAST_DIVIDE_NR_STEPS);
options->fast_divide_and_NR_steps = 0;
}
//Check inputs
if(ND1 == 0 || (autocorr == 0 && ND2 == 0)) {
return EXIT_SUCCESS;
}
//Check inputs
int status1 = check_ra_dec_cz_s_mu_double(ND1, ra1, dec1, czD1);
if(status1 != EXIT_SUCCESS) {
return status1;
}
if(autocorr==0) {
int status2 = check_ra_dec_cz_s_mu_double(ND2, ra2, dec2, czD2);
if(status2 != EXIT_SUCCESS) {
return status2;
}
}
#if defined(_OPENMP)
omp_set_num_threads(numthreads);
#else
(void) numthreads;
#endif
if(options->max_cells_per_dim == 0) {
fprintf(stderr,"Warning: Max. cells per dimension is set to 0 - resetting to `NLATMAX' = %d\n", NLATMAX);
options->max_cells_per_dim = NLATMAX;
}
for(int i=0;i<3;i++) {
if(options->bin_refine_factors[i] < 1) {
fprintf(stderr,"Warning: bin refine factor along axis = %d *must* be >=1. Instead found bin refine factor =%d\n",
i, options->bin_refine_factors[i]);
reset_bin_refine_factors(options);
break;/* all factors have been reset -> no point continuing with the loop */
}
}
/* setup interrupt handler -> mostly useful during the python execution.
Let's Ctrl-C abort the extension */
SETUP_INTERRUPT_HANDLERS(interrupt_handler_countpairs_s_mu_mocks_double);
//Try to initialize cosmology - code will exit if comoslogy is not implemented.
//Putting in a different scope so I can call the variable status
{
int status = init_cosmology(cosmology);
if(status != EXIT_SUCCESS) {
return status;
}
}
/***********************
*initializing the bins
************************/
double *supp;
int nsbin;
double smin,smax;
setup_bins(sbinfile,&smin,&smax,&nsbin,&supp);
if( ! (smin > 0.0 && smax > 0.0 && smin < smax && nsbin > 0)) {
fprintf(stderr,"Error: Could not setup with S bins correctly. (smin = %lf, smax = %lf, with nbins = %d). Expected non-zero smin/smax with smax > smin and nbins >=1 \n",
smin, smax, nsbin);
return EXIT_FAILURE;
}
if(max_mu <= 0.0 || max_mu > 1.0) {
fprintf(stderr,"Error: max_mu (max. value for the cosine of the angle with line of sight) must be greater than 0 and at most 1).\n"
"The passed value is max_mu = %lf. Please change it to be > 0 and <= 1.0\n", max_mu);
return EXIT_FAILURE;
}
if(nmu_bins < 1 ) {
fprintf(stderr,"Error: Number of mu bins = %d must be at least 1\n", nmu_bins);
return EXIT_FAILURE;
}
//Change cz into co-moving distance
double *D1 = NULL, *D2 = NULL;
if(options->is_comoving_dist == 0) {
D1 = my_malloc(sizeof(*D1),ND1);
D2 = autocorr == 0 ? my_malloc(sizeof(*D2),ND2):D1;
} else {
D1 = czD1;
D2 = autocorr == 0 ? czD2:czD1;
}
if(D1 == NULL || D2 == NULL) {
free(D1);free(D2);
return EXIT_FAILURE;
}
if(options->is_comoving_dist == 0) {
//Setup variables to do the cz->comoving distance
double czmax = 0.0;
const double inv_speed_of_light = 1.0/SPEED_OF_LIGHT;
get_max_double(ND1, czD1, &czmax);
if(autocorr == 0) {
get_max_double(ND2, czD2, &czmax);
}
const double zmax = czmax * inv_speed_of_light + 0.01;
const int workspace_size = 10000;
double *interp_redshift = my_calloc(sizeof(*interp_redshift), workspace_size);//the interpolation is done in 'z' and not in 'cz'
double *interp_comoving_dist = my_calloc(sizeof(*interp_comoving_dist),workspace_size);
int Nzdc = set_cosmo_dist(zmax, workspace_size, interp_redshift, interp_comoving_dist, cosmology);
if(Nzdc < 0) {
free(interp_redshift);free(interp_comoving_dist);
return EXIT_FAILURE;
}
gsl_interp *interpolation;
gsl_interp_accel *accelerator;
accelerator = gsl_interp_accel_alloc();
interpolation = gsl_interp_alloc (gsl_interp_linear,Nzdc);
gsl_interp_init(interpolation, interp_redshift, interp_comoving_dist, Nzdc);
for(int64_t i=0;i<ND1;i++) {
D1[i] = gsl_interp_eval(interpolation, interp_redshift, interp_comoving_dist, czD1[i]*inv_speed_of_light, accelerator);
}
if(autocorr==0) {
for(int64_t i=0;i<ND2;i++) {
D2[i] = gsl_interp_eval(interpolation, interp_redshift, interp_comoving_dist, czD2[i]*inv_speed_of_light, accelerator);
}
}
free(interp_redshift);free(interp_comoving_dist);
gsl_interp_free(interpolation);
gsl_interp_accel_free(accelerator);
}
double *X1 = my_malloc(sizeof(*X1), ND1);
double *Y1 = my_malloc(sizeof(*Y1), ND1);
double *Z1 = my_malloc(sizeof(*Z1), ND1);
if(X1 == NULL || Y1 == NULL || Z1 == NULL) {
free(X1);free(Y1);free(Z1);
return EXIT_FAILURE;
}
for(int64_t i=0;i<ND1;i++) {
X1[i] = D1[i]*COSD(dec1[i])*COSD(ra1[i]);
Y1[i] = D1[i]*COSD(dec1[i])*SIND(ra1[i]);
Z1[i] = D1[i]*SIND(dec1[i]);
}
double *X2,*Y2,*Z2;
if(autocorr==0) {
X2 = my_malloc(sizeof(*X2), ND2);
Y2 = my_malloc(sizeof(*Y2), ND2);
Z2 = my_malloc(sizeof(*Z2), ND2);
for(int64_t i=0;i<ND2;i++) {
X2[i] = D2[i]*COSD(dec2[i])*COSD(ra2[i]);
Y2[i] = D2[i]*COSD(dec2[i])*SIND(ra2[i]);
Z2[i] = D2[i]*SIND(dec2[i]);
}
} else {
X2 = X1;
Y2 = Y1;
Z2 = Z1;
}
double supp_sqr[nsbin];
for(int i=0; i < nsbin;i++) {
supp_sqr[i] = supp[i]*supp[i];
}
const double mu_max = (double) max_mu;
double xmin=1e10,ymin=1e10,zmin=1e10;
double xmax=-1e10,ymax=-1e10,zmax=-1e10;
get_max_min_data_double(ND1, X1, Y1, Z1, &xmin, &ymin, &zmin, &xmax, &ymax, &zmax);
if(autocorr==0) {
get_max_min_data_double(ND2, X2, Y2, Z2, &xmin, &ymin, &zmin, &xmax, &ymax, &zmax);
}
const double xdiff = xmax-xmin;
const double ydiff = ymax-ymin;
const double zdiff = zmax-zmin;
if(get_bin_refine_scheme(options) == BINNING_DFL) {
if(smax < 0.05*xdiff) {
options->bin_refine_factors[0] = 1;
}
if(smax < 0.05*ydiff) {
options->bin_refine_factors[1] = 1;
}
if(smax < 0.05*zdiff) {
options->bin_refine_factors[2] = 1;
}
}
/*---Create 3-D lattice--------------------------------------*/
int nmesh_x=0,nmesh_y=0,nmesh_z=0;
cellarray_mocks_index_particles_double *lattice1 = gridlink_mocks_index_particles_double(ND1, X1, Y1, Z1, D1, &(extra->weights0),
xmin, xmax, ymin, ymax, zmin, zmax,
smax, smax, smax,
options->bin_refine_factors[0],
options->bin_refine_factors[1],
options->bin_refine_factors[2],
&nmesh_x, &nmesh_y, &nmesh_z,
options);
if(lattice1 == NULL) {
return EXIT_FAILURE;
}
/* If there too few cells (BOOST_CELL_THRESH is ~10), and the number of cells can be increased, then boost bin refine factor by ~1*/
const double avg_np = ((double)ND1)/(nmesh_x*nmesh_y*nmesh_z);
const int8_t max_nmesh = fmax(nmesh_x, fmax(nmesh_y, nmesh_z));
if((max_nmesh <= BOOST_CELL_THRESH || avg_np >= BOOST_NUMPART_THRESH)
&& max_nmesh < options->max_cells_per_dim) {
fprintf(stderr,"%s> gridlink seems inefficient. nmesh = (%d, %d, %d); avg_np = %.3g. ", __FUNCTION__, nmesh_x, nmesh_y, nmesh_z, avg_np);
if(get_bin_refine_scheme(options) == BINNING_DFL) {
fprintf(stderr,"Boosting bin refine factor - should lead to better performance\n");
// Only boost the first two dimensions. Prevents excessive refinement.
for(int i=0;i<2;i++) {
options->bin_refine_factors[i] += BOOST_BIN_REF;
}
free_cellarray_mocks_index_particles_double(lattice1, nmesh_x * (int64_t) nmesh_y * nmesh_z);
lattice1 = gridlink_mocks_index_particles_double(ND1, X1, Y1, Z1, D1, &(extra->weights0),
xmin, xmax, ymin, ymax, zmin, zmax,
smax, smax, smax,
options->bin_refine_factors[0],
options->bin_refine_factors[1],
options->bin_refine_factors[2],
&nmesh_x, &nmesh_y, &nmesh_z,
options);
if(lattice1 == NULL) {
return EXIT_FAILURE;
}
} else {
fprintf(stderr,"Boosting bin refine factor could have helped. However, since custom bin refine factors "
"= (%d, %d, %d) are being used - continuing with inefficient mesh\n", options->bin_refine_factors[0],
options->bin_refine_factors[1], options->bin_refine_factors[2]);
}
}
cellarray_mocks_index_particles_double *lattice2 = NULL;
if(autocorr==0) {
int ngrid2_x=0,ngrid2_y=0,ngrid2_z=0;
lattice2 = gridlink_mocks_index_particles_double(ND2, X2, Y2, Z2, D2, &(extra->weights1),
xmin, xmax,
ymin, ymax,
zmin, zmax,
smax, smax, smax,
options->bin_refine_factors[0],
options->bin_refine_factors[1],
options->bin_refine_factors[2],
&ngrid2_x, &ngrid2_y, &ngrid2_z, options);
if(lattice2 == NULL) {
return EXIT_FAILURE;
}
if( ! (nmesh_x == ngrid2_x && nmesh_y == ngrid2_y && nmesh_z == ngrid2_z) ) {
fprintf(stderr,"Error: The two sets of 3-D lattices do not have identical bins. First has dims (%d, %d, %d) while second has (%d, %d, %d)\n",
nmesh_x, nmesh_y, nmesh_z, ngrid2_x, ngrid2_y, ngrid2_z);
return EXIT_FAILURE;
}
} else {
lattice2 = lattice1;
}
free(X1);free(Y1);free(Z1);
if(autocorr == 0) {
free(X2);free(Y2);free(Z2);
}
if(options->is_comoving_dist == 0) {
free(D1);
if(autocorr == 0) {
free(D2);
}
}
const int64_t totncells = (int64_t) nmesh_x * (int64_t) nmesh_y * (int64_t) nmesh_z;
{
int status = assign_ngb_cells_mocks_index_particles_double(lattice1, lattice2, totncells,
options->bin_refine_factors[0], options->bin_refine_factors[1], options->bin_refine_factors[2],
nmesh_x, nmesh_y, nmesh_z,
autocorr);
if(status != EXIT_SUCCESS) {
free_cellarray_mocks_index_particles_double(lattice1, totncells);
if(autocorr == 0) {
free_cellarray_mocks_index_particles_double(lattice2, totncells);
}
free(supp);
return EXIT_FAILURE;
}
}
/*---Gridlink-variables----------------*/
const int totnbins = (nmu_bins+1)*(nsbin+1);
const int nprojbins = nsbin-1;
#if defined(_OPENMP)
uint64_t **all_npairs = (uint64_t **) matrix_calloc(sizeof(uint64_t), numthreads, totnbins);
double **all_savg = NULL;
if(options->need_avg_sep){
all_savg = (double **) matrix_calloc(sizeof(double),numthreads,totnbins);
}
double **all_weightavg = NULL;
if(need_weightavg) {
all_weightavg = (double **) matrix_calloc(sizeof(double),numthreads,totnbins);
}
double **all_projpairs = (double **) matrix_calloc(sizeof(double),numthreads,nprojbins);
double **all_projpairs_tensor = (double **) matrix_calloc(sizeof(double),numthreads,nprojbins*nprojbins);
#else //USE_OMP
uint64_t npairs[totnbins];
double savg[totnbins], weightavg[totnbins], projpairs[nprojbins];
double projpairs_tensor[nprojbins*nprojbins];
for(int i=0; i <totnbins;i++) {
npairs[i] = 0;
if(options->need_avg_sep) {
savg[i] = ZERO;
}
if(need_weightavg) {
weightavg[i] = ZERO;
}
}
for(int i=0;i<nprojbins;i++) {
projpairs[i] = ZERO;
for(int j=0;j<nprojbins;j++) {
projpairs_tensor[i*nprojbins+j] = ZERO;
}
}
#endif //USE_OMP
/* runtime dispatch - get the function pointer */
countpairs_mocks_func_ptr_double countpairs_s_mu_mocks_function_double = countpairs_s_mu_mocks_driver_double(options);
if(countpairs_s_mu_mocks_function_double == NULL) {
return EXIT_FAILURE;
}
int interrupted=0,numdone=0, abort_status=EXIT_SUCCESS;
if(options->verbose) {
init_my_progressbar(totncells,&interrupted);
}
#if defined(_OPENMP)
#pragma omp parallel shared(numdone, abort_status, interrupt_status_DDsmu_mocks_double)
{
const int tid = omp_get_thread_num();
uint64_t npairs[totnbins];
double savg[totnbins], weightavg[totnbins], projpairs[nprojbins];
double projpairs_tensor[nprojbins*nprojbins];
for(int i=0;i<totnbins;i++) {
npairs[i] = 0;
if(options->need_avg_sep) {
savg[i] = ZERO;
}
if(need_weightavg) {
weightavg[i] = ZERO;
}
}
for(int i=0;i<nprojbins;i++) {
projpairs[i] = ZERO;
for(int j=0;j<nprojbins;j++) {
projpairs_tensor[i*nprojbins+j] = ZERO;
}
}
#pragma omp for schedule(dynamic)
#endif//USE_OMP
/*---Loop-over-Data1-particles--------------------*/
for(int64_t index1=0;index1<totncells;index1++) {
#if defined(_OPENMP)
#pragma omp flush (abort_status, interrupt_status_DDsmu_mocks_double)
#endif
if(abort_status == EXIT_SUCCESS && interrupt_status_DDsmu_mocks_double == EXIT_SUCCESS) {
//omp cancel was introduced in omp 4.0 - so this is my way of checking if loop needs to be cancelled
/* If the verbose option is not enabled, avoid outputting anything unnecessary*/
if(options->verbose) {
#if defined(_OPENMP)
if (omp_get_thread_num() == 0)
#endif
my_progressbar(numdone,&interrupted);
#if defined(_OPENMP)
#pragma omp atomic
#endif
numdone++;
}
const cellarray_mocks_index_particles_double *first = &(lattice1[index1]);
if(first->nelements == 0) {
continue;
}
double *x1 = first->x;
double *y1 = first->y;
double *z1 = first->z;
double *d1 = first->cz;
const weight_struct_double *weights1 = &(first->weights);
const int64_t N1 = first->nelements;
if(autocorr == 1) {
int same_cell = 1;
double *this_savg = options->need_avg_sep ? &(savg[0]):NULL;
double *this_weightavg = need_weightavg ? weightavg:NULL;
const int status = countpairs_s_mu_mocks_function_double(N1, x1, y1, z1, d1, weights1,
N1, x1, y1, z1, d1, weights1,
same_cell,
options->fast_divide_and_NR_steps,
smax, smin, nsbin,
nmu_bins, supp_sqr, mu_max,
this_savg, npairs, projpairs,
projpairs_tensor,
this_weightavg, extra->weight_method);
/* This actually causes a race condition under OpenMP - but mostly
I care that an error occurred - rather than the exact value of
the error status */
abort_status |= status;
}
for(int64_t ngb=0;ngb<first->num_ngb;ngb++){
const cellarray_mocks_index_particles_double *second = first->ngb_cells[ngb];
if(second->nelements == 0) {
continue;
}
const int same_cell = 0;
double *x2 = second->x;
double *y2 = second->y;
double *z2 = second->z;
double *d2 = second->cz;
const weight_struct_double *weights2 = &(second->weights);
const int64_t N2 = second->nelements;
double *this_savg = options->need_avg_sep ? &(savg[0]):NULL;
double *this_weightavg = need_weightavg ? weightavg:NULL;
const int status = countpairs_s_mu_mocks_function_double(N1, x1, y1, z1, d1, weights1,
N2, x2, y2, z2, d2, weights2,
same_cell,
options->fast_divide_and_NR_steps,
smax, smin, nsbin,
nmu_bins, supp_sqr, mu_max,
this_savg, npairs, projpairs,
projpairs_tensor,
this_weightavg, extra->weight_method);
/* This actually causes a race condition under OpenMP - but mostly
I care that an error occurred - rather than the exact value of
the error status */
abort_status |= status;
}//loop over ngb cells
}//abort_status check
}//i loop over ND1 particles
#if defined(_OPENMP)
for(int i=0;i<totnbins;i++) {
all_npairs[tid][i] = npairs[i];
if(options->need_avg_sep) {
all_savg[tid][i] = savg[i];
}
if(need_weightavg) {
all_weightavg[tid][i] = weightavg[i];
}
}
for (int i=0;i<nprojbins;i++) {
all_projpairs[tid][i] = projpairs[i];
for(int j=0;j<nprojbins;j++) {
all_projpairs_tensor[tid][i*nprojbins+j] = projpairs_tensor[i*nprojbins+j];
}
}
}//close the omp parallel region
#endif//USE_OMP
free_cellarray_mocks_index_particles_double(lattice1,totncells);
if(autocorr == 0) {
free_cellarray_mocks_index_particles_double(lattice2,totncells);
}
if(abort_status != EXIT_SUCCESS || interrupt_status_DDsmu_mocks_double != EXIT_SUCCESS) {
/* Cleanup memory here if aborting */
free(supp);
#if defined(_OPENMP)
matrix_free((void **) all_npairs, numthreads);
if(options->need_avg_sep) {
matrix_free((void **) all_savg, numthreads);
}
if(need_weightavg) {
matrix_free((void **) all_weightavg, numthreads);
}
matrix_free((void **) all_projpairs, numthreads);
matrix_free((void **) all_projpairs_tensor, numthreads);
#endif
return EXIT_FAILURE;
}
if(options->verbose) {
finish_myprogressbar(&interrupted);
}
#if defined(_OPENMP)
uint64_t npairs[totnbins];
double savg[totnbins], weightavg[totnbins], projpairs[nprojbins];
double projpairs_tensor[nprojbins*nprojbins];
for(int i=0;i<totnbins;i++) {
npairs[i] = 0;
if(options->need_avg_sep) {
savg[i] = ZERO;
}
if(need_weightavg) {
weightavg[i] = ZERO;
}
}
for(int i=0;i<nprojbins;i++) {
projpairs[i] = ZERO;
for(int j=0;j<nprojbins;j++) {
projpairs_tensor[i*nprojbins+j] = ZERO;
}
}
for(int i=0;i<numthreads;i++) {
for(int j=0;j<totnbins;j++) {
npairs[j] += all_npairs[i][j];
if(options->need_avg_sep) {
savg[j] += all_savg[i][j];
}
if(need_weightavg) {
weightavg[j] += all_weightavg[i][j];
}
}
for(int j=0;j<nprojbins;j++) {
projpairs[j] += all_projpairs[i][j];
for(int k=0;k<nprojbins;k++) {
projpairs_tensor[j*nprojbins+k] += all_projpairs_tensor[i][j*nprojbins+k];
}
}
}
matrix_free((void **) all_npairs, numthreads);
if(options->need_avg_sep) {
matrix_free((void **) all_savg, numthreads);
}
if(need_weightavg) {
matrix_free((void **) all_weightavg, numthreads);
}
matrix_free((void **) all_projpairs, numthreads);
matrix_free((void **) all_projpairs_tensor, numthreads);
#endif //USE_OMP
//The code does not double count for autocorrelations
//which means the npairs and savg values need to be doubled;
if(autocorr == 1) {
const uint64_t int_fac = 2;
const double dbl_fac = (double) 2.0;
for(int i=0;i<totnbins;i++) {
npairs[i] *= int_fac;
if(options->need_avg_sep) {
savg[i] *= dbl_fac;
}
if(need_weightavg) {
weightavg[i] *= dbl_fac;
}
}
//TODO: do i also want to double this? think so
for(int i=0;i<nprojbins;i++) {
projpairs[i] *= dbl_fac;
for(int j=0;j<nprojbins;j++) {
projpairs_tensor[i*nprojbins+j] *= dbl_fac;
}
}
}
for(int i=0;i<totnbins;i++) {
if(npairs[i] > 0) {
if(options->need_avg_sep) {
savg[i] /= (double) npairs[i] ;
}
if(need_weightavg) {
weightavg[i] /= (double) npairs[i];
}
}
}
// don't need proj_pairs here, not averaging
results->nsbin = nsbin;
results->nmu_bins = nmu_bins;
results->mu_max = max_mu;//NOTE max_mu which is double and not mu_max (which might be float)
results->mu_min = ZERO;
results->npairs = my_malloc(sizeof(*(results->npairs)), totnbins);
results->projpairs = my_malloc(sizeof(*(results->npairs)), nprojbins);
results->projpairs_tensor = my_malloc(sizeof(*(results->npairs)), nprojbins*nprojbins);
results->supp = my_malloc(sizeof(*(results->supp)) , nsbin);
results->savg = my_malloc(sizeof(*(results->savg)) , totnbins);
results->weightavg = my_calloc(sizeof(double) , totnbins);
if(results->npairs == NULL || results->supp == NULL || results->savg == NULL || results->weightavg == NULL || results->projpairs == NULL) {
free_results_mocks_s_mu(results);
free(supp);
return EXIT_FAILURE;
}
for(int i=0;i<nsbin;i++) {
results->supp[i] = supp[i];
for(int j=0;j<nmu_bins;j++) {
const int index = i*(nmu_bins+1) + j;
if( index >= totnbins ) {
fprintf(stderr, "ERROR: In %s> index = %d must be in range [0, %d)\n", __FUNCTION__, index, totnbins);
free_results_mocks_s_mu(results);
free(supp);
return EXIT_FAILURE;
}
results->npairs[index] = npairs[index];
results->savg[index] = ZERO;
results->weightavg[index] = ZERO;
if(options->need_avg_sep) {
results->savg[index] = savg[index];
}
if(need_weightavg) {
results->weightavg[index] = weightavg[index];
}
}
}
for(int i=0;i<nprojbins;i++) {
results->projpairs[i] = projpairs[i];
for(int j=0;j<nprojbins;j++) {
results->projpairs_tensor[i*nprojbins+j] = projpairs_tensor[i*nprojbins+j];
}
}
free(supp);
/* reset interrupt handlers to default */
RESET_INTERRUPT_HANDLERS();
reset_bin_refine_factors(options);
if(options->c_api_timer) {
struct timeval t1;
gettimeofday(&t1, NULL);
options->c_api_time = ADD_DIFF_TIME(t0, t1);
}
return EXIT_SUCCESS;
}
|
GB_unop__identity_fc32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_uint32)
// op(A') function: GB (_unop_tran__identity_fc32_uint32)
// C type: GxB_FC32_t
// A type: uint32_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_uint32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
binbased_projection.h | // KRATOS __ __ _____ ____ _ _ ___ _ _ ____
// | \/ | ____/ ___|| | | |_ _| \ | |/ ___|
// | |\/| | _| \___ \| |_| || || \| | | _
// | | | | |___ ___) | _ || || |\ | |_| |
// |_| |_|_____|____/|_| |_|___|_| \_|\____| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Antonia Larese De Tetto
//
#if !defined(KRATOS_BINBASED_PROJECTION )
#define KRATOS_BINBASED_PROJECTION
//External includes
// System includes
#include <string>
#include <iostream>
#include <stdlib.h>
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "utilities/timer.h"
#include "meshing_application.h"
//Database includes
#include "spatial_containers/spatial_containers.h"
#include "utilities/binbased_fast_point_locator.h"
#include "utilities/binbased_nodes_in_element_locator.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// This class allows the interpolation between non-matching meshes in 2D and 3D.
/** @author Antonia Larese De Tetto <antoldt@cimne.upc.edu>
*
* This class allows the interpolation of a scalar or vectorial variable between non-matching meshes
* in 2D and 3D.
*
* For every node of the destination model part it is checked in which element of the origin model part it is
* contained and a linear interpolation is performed
*
* The data structure used by default is static bin.
* In order to use this utility the construction of a bin of object @see BinBasedNodesInElementLocator
* and a bin of nodes @see BinBasedFastPointLocator
* is required at the beginning of the calculation (only ONCE).
*/
//class BinBasedMeshTransfer
template<std::size_t TDim >
class BinBasedMeshTransfer
{
public:
///@name Type Definitions
///@{
/// Pointer definition of BinBasedMeshTransfer
KRATOS_CLASS_POINTER_DEFINITION(BinBasedMeshTransfer<TDim >);
/// Node type definition
typedef Node<3> NodeType;
typedef Geometry<NodeType> GeometryType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
BinBasedMeshTransfer() = default; //
/// Destructor.
virtual ~BinBasedMeshTransfer() = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
//If you want to pass the whole model part
//**********************************************************************
//**********************************************************************
/// Interpolate the whole problem type
/**
* @param rOrigin_ModelPart: the model part all the variable should be taken from
* @param rDestination_ModelPart: the destination model part where we want to know the values of the variables
*/
void DirectInterpolation(
ModelPart& rOrigin_ModelPart ,
ModelPart& rDestination_ModelPart
)
{
KRATOS_TRY
KRATOS_ERROR << "Not implemented yet" << std::endl;
KRATOS_CATCH("")
}
//If you want to pass only one variable
//**********************************************************************
//**********************************************************************
/// Interpolate one variable from the fixed mesh to the moving one
/**
* @param rFixed_ModelPart: the model part all the variable should be taken from
* @param rMoving_ModelPart: the destination model part where we want to know the values of the variables
* @param rFixedDomainVariable: the name of the interpolated variable in the origin model part
* @param rMovingDomainVariable: the name of the interpolated variable in the destination model part
* @param node_locator: precomputed bin of objects. It is to be constructed separately @see binbased_fast_point_locator.h
*/
// Form fixed to moving model part
template<class TDataType>
void DirectVariableInterpolation(
ModelPart& rFixed_ModelPart ,
ModelPart& rMoving_ModelPart,
Variable<TDataType>& rFixedDomainVariable ,
Variable<TDataType>& rMovingDomainVariable,
BinBasedFastPointLocator<TDim>& node_locator
)
{
KRATOS_TRY
KRATOS_INFO("BinBasedMeshTransfer") << "Interpolate From Fixed Mesh*************************************" << std::endl;
//creating an auxiliary list for the new nodes
for(auto node_it = rMoving_ModelPart.NodesBegin(); node_it != rMoving_ModelPart.NodesEnd(); ++node_it) {
ClearVariables(node_it, rMovingDomainVariable);
}
Vector N(TDim + 1);
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rMoving_ModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++) {
ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i;
NodeType::Pointer pparticle = *(iparticle.base());
auto result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true) {
//Interpolate( ElemIt, N, *it_found , rFixedDomainVariable , rMovingDomainVariable );
Interpolate( pelement, N, pparticle, rFixedDomainVariable , rMovingDomainVariable );
}
}
KRATOS_CATCH("")
}
/// Map one variable from the moving mesh to the fixed one -The two meshes should be of the same dimensions otherwise better to use
/// MappingFromMovingMesh_VariableMeshes that is a much generic tool.
/**
* @param rFixed_ModelPart: the model part all the variable should be taken from
* @param rMoving_ModelPart: the destination model part where we want to know the values of the variables
* @param rFixedDomainVariable: the name of the interpolated variable in the origin model part
* @param rMovingDomainVariable: the name of the interpolated variable in the destination model part
* @param node_locator: precomputed bin of objects (elelments of the fixed mesh). It is to be constructed separately @see binbased_nodes_in_element_locator
*/
// From moving to fixed model part
template<class TDataType>
void MappingFromMovingMesh(
ModelPart& rMoving_ModelPart ,
ModelPart& rFixed_ModelPart,
Variable<TDataType>& rMovingDomainVariable ,
Variable<TDataType>& rFixedDomainVariable,
BinBasedFastPointLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part
)
{
KRATOS_TRY
KRATOS_INFO("BinBasedMeshTransfer") << "Transfer From Moving Mesh*************************************" << std::endl;
if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", "");
if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", "");
//creating an auxiliary list for the new nodes
for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); ++node_it)
{
ClearVariables(node_it, rFixedDomainVariable);
}
for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); node_it++)
{
// if (node_it->IsFixed(VELOCITY_X) == false)
// {
// (node_it)->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3);
// (node_it)->FastGetSolutionStepValue(TEMPERATURE) = 0.0;
(node_it)->GetValue(YOUNG_MODULUS) = 0.0;
// }
}
//defintions for spatial search
// typedef NodeType PointType;
// typedef NodeType::Pointer PointTypePointer;
Vector N(TDim + 1);
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rMoving_ModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i;
NodeType::Pointer pparticle = *(iparticle.base());
auto result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
GeometryType& geom = pelement->GetGeometry();
// const array_1d<double, 3 > & vel_particle = (iparticle)->FastGetSolutionStepValue(VELOCITY);
// const double& temperature_particle = (iparticle)->FastGetSolutionStepValue(TEMPERATURE);
const TDataType& value = (iparticle)->FastGetSolutionStepValue(rMovingDomainVariable);
for (std::size_t k = 0; k < geom.size(); k++)
{
geom[k].SetLock();
geom[k].FastGetSolutionStepValue(rFixedDomainVariable) += N[k] * value;
geom[k].GetValue(YOUNG_MODULUS) += N[k];
geom[k].UnSetLock();
}
}
}
for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); node_it++)
{
const double NN = (node_it)->GetValue(YOUNG_MODULUS);
if (NN != 0.0)
{
(node_it)->FastGetSolutionStepValue(rFixedDomainVariable) /= NN;
}
}
KRATOS_CATCH("")
}
// From moving to fixed model part
/// Interpolate one variable from the moving mesh to the fixed one
/**
* @param rFixed_ModelPart: the model part all the variable should be taken from
* @param rMoving_ModelPart: the destination model part where we want to know the values of the variables
* @param rFixedDomainVariable: the name of the interpolated variable in the origin model part
* @param rMovingDomainVariable: the name of the interpolated variable in the destination model part
* @param node_locator: precomputed bin of nodes of the fixed mesh. It is to be constructed separately @see binbased_nodes_in_element_locator
*/
template<class TDataType>
void MappingFromMovingMesh_VariableMeshes(
ModelPart& rMoving_ModelPart ,
ModelPart& rFixed_ModelPart,
Variable<TDataType>& rMovingDomainVariable ,
Variable<TDataType>& rFixedDomainVariable,
BinBasedNodesInElementLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part
)
{
KRATOS_TRY
KRATOS_WATCH("Transfer From Moving Mesh*************************************")
if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", "");
if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", "");
//creating an auxiliary list for the new nodes
for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin();
node_it != rFixed_ModelPart.NodesEnd(); ++node_it)
{
ClearVariables(node_it, rFixedDomainVariable);
}
//defintions for spatial search
typedef typename BinBasedNodesInElementLocator<TDim>::PointVector PointVector;
typedef typename BinBasedNodesInElementLocator<TDim>::DistanceVector DistanceVector;
const std::size_t max_results = 5000;
Matrix Nmat(max_results,TDim+1);
boost::numeric::ublas::vector<int> positions(max_results);
PointVector work_results(max_results);
DistanceVector work_distances(max_results);
Node<3> work_point(0,0.0,0.0,0.0);
for(ModelPart::ElementsContainerType::iterator elem_it = rMoving_ModelPart.ElementsBegin(); elem_it != rMoving_ModelPart.ElementsEnd(); ++elem_it)
{
std::size_t nfound = node_locator.FindNodesInElement(*(elem_it.base()), positions, Nmat, max_results, work_results.begin(), work_distances.begin(), work_point);
for(std::size_t k=0; k<nfound; k++)
{
auto it = work_results.begin() + positions[k];
array_1d<double,TDim+1> N = row(Nmat,k);
Interpolate( *(elem_it.base()), N, *it, rMovingDomainVariable , rFixedDomainVariable);
}
}
KRATOS_CATCH("")
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a stemplate<class T, std::size_t dim> tring.
virtual std::string Info() const
{
return "";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const {}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member rVariables
///@{
///@}
///@name Protected member rVariables
///@{ template<class T, std::size_t dim>
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member rVariables
///@{
///@}
///@name Member rVariables
///@{
inline void CalculateCenterAndSearchRadius(GeometryType&geom,
double& xc, double& yc, double& zc, double& R, array_1d<double,3>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
xc = 0.3333333333333333333*(x0+x1+x2);
yc = 0.3333333333333333333*(y0+y1+y2);
zc = 0.0;
double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0);
double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1);
double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2);
R = R1;
if(R2 > R) R = R2;
if(R3 > R) R = R3;
R = 1.01 * sqrt(R);
}
//***************************************
//***************************************
inline void CalculateCenterAndSearchRadius(GeometryType&geom,
double& xc, double& yc, double& zc, double& R, array_1d<double,4>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
xc = 0.25*(x0+x1+x2+x3);
yc = 0.25*(y0+y1+y2+y3);
zc = 0.25*(z0+z1+z2+z3);
double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0) + (zc-z0)*(zc-z0);
double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1) + (zc-z1)*(zc-z1);
double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2) + (zc-z2)*(zc-z2);
double R4 = (xc-x3)*(xc-x3) + (yc-y3)*(yc-y3) + (zc-z3)*(zc-z3);
R = R1;
if(R2 > R) R = R2;
if(R3 > R) R = R3;
if(R4 > R) R = R4;
R = sqrt(R);
}
//***************************************
//***************************************
inline double CalculateVol( const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2
)
{
return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) );
}
//***************************************
//***************************************
inline double CalculateVol( const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3
)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ*0.1666666666666666666667;
//return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) );
}
//***************************************
//***************************************
inline bool CalculatePosition( GeometryType&geom,
const double xc, const double yc, const double zc,
array_1d<double,3>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double area = CalculateVol(x0,y0,x1,y1,x2,y2);
double inv_area = 0.0;
if(area == 0.0)
{
// KRATOS_THROW_ERROR(std::logic_error,"element with zero area found","");
//The interpolated node will not be inside an elemente with zero area
return false;
}
else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1,y1,x2,y2,xc,yc) * inv_area;
N[1] = CalculateVol(x2,y2,x0,y0,xc,yc) * inv_area;
N[2] = CalculateVol(x0,y0,x1,y1,xc,yc) * inv_area;
if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <=1.0 && N[1]<= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition( GeometryType&geom,
const double xc, const double yc, const double zc,
array_1d<double,4>& N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double vol = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3);
double inv_vol = 0.0;
if(vol < 0.0000000000001)
{
// KRATOS_THROW_ERROR(std::logic_error,"element with zero vol found","");
//The interpolated node will not be inside an elemente with zero volume
return false;
// KRATOS_WATCH("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
}
else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1,y1,z1,x3,y3,z3,x2,y2,z2,xc,yc,zc) * inv_vol;
N[1] = CalculateVol(x3,y3,z3,x0,y0,z0,x2,y2,z2,xc,yc,zc) * inv_vol;
N[2] = CalculateVol(x3,y3,z3,x1,y1,z1,x0,y0,z0,xc,yc,zc) * inv_vol;
N[3] = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,xc,yc,zc) * inv_vol;
if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >=0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <=1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
//ElemI Element iterator
//N Shape functions
//step_data_size
//pnode pointer to the node
//projecting total model part 2Dversion
void Interpolate(
Element::Pointer ElemIt,
const Vector& N,
int step_data_size,
NodeType::Pointer pnode)
{
//Geometry element of the rOrigin_ModelPart
GeometryType& geom = ElemIt->GetGeometry();
const std::size_t buffer_size = pnode->GetBufferSize();
const std::size_t vector_size = N.size();
for(std::size_t step = 0; step<buffer_size; step++) {
//getting the data of the solution step
double* step_data = (pnode)->SolutionStepData().Data(step);
double* node0_data = geom[0].SolutionStepData().Data(step);
//copying this data in the position of the vector we are interested in
for(int j= 0; j< step_data_size; j++) {
step_data[j] = N[0]*node0_data[j];
}
for(std::size_t k= 1; k< vector_size; k++) {
double* node1_data = geom[k].SolutionStepData().Data(step);
for(int j= 0; j< step_data_size; j++) {
step_data[j] += N[k]*node1_data[j];
}
}
}
// pnode->GetValue(IS_VISITED) = 1.0;
}
//projecting an array1D 2Dversion
void Interpolate(
Element::Pointer ElemIt,
const Vector& N,
NodeType::Pointer pnode,
Variable<array_1d<double,3> >& rOriginVariable,
Variable<array_1d<double,3> >& rDestinationVariable)
{
//Geometry element of the rOrigin_ModelPart
GeometryType& geom = ElemIt->GetGeometry();
const std::size_t buffer_size = pnode->GetBufferSize();
const std::size_t vector_size = N.size();
for(std::size_t step = 0; step<buffer_size; step++) {
//getting the data of the solution step
array_1d<double,3>& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step);
//Reference or no reference???//CANCELLA
step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step);
// Copying this data in the position of the vector we are interested in
for(std::size_t j= 1; j< vector_size; j++) {
const array_1d<double,3>& node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step);
step_data += N[j] * node_data;
}
}
// pnode->GetValue(IS_VISITED) = 1.0;
}
//projecting a scalar 2Dversion
void Interpolate(
Element::Pointer ElemIt,
const Vector& N,
NodeType::Pointer pnode,
Variable<double>& rOriginVariable,
Variable<double>& rDestinationVariable)
{
//Geometry element of the rOrigin_ModelPart
GeometryType& geom = ElemIt->GetGeometry();
const std::size_t buffer_size = pnode->GetBufferSize();
const std::size_t vector_size = N.size();
//facendo un loop sugli step temporali step_data come salva i dati al passo anteriore? Cioś dove passiamo l'informazione ai nodi???
for(std::size_t step = 0; step<buffer_size; step++) {
//getting the data of the solution step
double& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step);
//Reference or no reference???//CANCELLA
//copying this data in the position of the vector we are interested in
step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step);
// Copying this data in the position of the vector we are interested in
for(std::size_t j= 1; j< vector_size; j++) {
const double node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step);
step_data += N[j] * node_data;
}
}
// pnode->GetValue(IS_VISITED) = 1.0;
}
inline void Clear(ModelPart::NodesContainerType::iterator node_it, int step_data_size )
{
std::size_t buffer_size = node_it->GetBufferSize();
for(std::size_t step = 0; step<buffer_size; step++)
{
//getting the data of the solution step
double* step_data = (node_it)->SolutionStepData().Data(step);
//copying this data in the position of the vector we are interested in
for(int j= 0; j< step_data_size; j++)
{
step_data[j] = 0.0;
}
}
}
inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it , Variable<array_1d<double,3> >& rVariable)
{
array_1d<double, 3>& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0);
noalias(Aux_var) = ZeroVector(3);
}
inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it, Variable<double>& rVariable)
{
double& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0);
Aux_var = 0.0;
}
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
BinBasedMeshTransfer& operator=(BinBasedMeshTransfer const& rOther);
///@}
}; // Class BinBasedMeshTransfer
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// output stream function
template<std::size_t TDim>
inline std::ostream& operator << (std::ostream& rOStream,
const BinBasedMeshTransfer<TDim>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_BINBASED_PROJECTION defined
|
main.c | #include "../comms.h"
#include "../mesh.h"
#include "../params.h"
#include "../profiler.h"
#include "../shared_data.h"
#include "neutral_interface.h"
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef MPI
#include "mpi.h"
#endif
void plot_particle_density(NeutralData* neutral_data, Mesh* mesh, const int tt,
const int nparticles, const double elapsed_sim_time);
int main(int argc, char** argv) {
if (argc != 2) {
TERMINATE("usage: ./neutral.exe <param_file>\n");
}
// Store the dimensions of the mesh
Mesh mesh;
NeutralData neutral_data;
neutral_data.neutral_params_filename = argv[1];
mesh.global_nx =
get_int_parameter("nx", neutral_data.neutral_params_filename);
mesh.global_ny =
get_int_parameter("ny", neutral_data.neutral_params_filename);
mesh.pad = 0;
mesh.local_nx = mesh.global_nx + 2 * mesh.pad;
mesh.local_ny = mesh.global_ny + 2 * mesh.pad;
mesh.width = get_double_parameter("width", ARCH_ROOT_PARAMS);
mesh.height = get_double_parameter("height", ARCH_ROOT_PARAMS);
mesh.dt = get_double_parameter("dt", neutral_data.neutral_params_filename);
mesh.sim_end = get_double_parameter("sim_end", ARCH_ROOT_PARAMS);
mesh.niters =
get_int_parameter("iterations", neutral_data.neutral_params_filename);
mesh.rank = MASTER;
mesh.nranks = 1;
mesh.ndims = 2;
const int visit_dump =
get_int_parameter("visit_dump", neutral_data.neutral_params_filename);
// Get the number of threads and initialise the random number pool
#pragma omp parallel
{ neutral_data.nthreads = omp_get_num_threads(); }
printf("Starting up with %d OpenMP threads.\n", neutral_data.nthreads);
printf("Loading problem from %s.\n", neutral_data.neutral_params_filename);
#ifdef ENABLE_PROFILING
/* The timing code has to be called so many times that the API calls
* actually begin to influence the performance dramatically. */
fprintf(stderr,
"Warning. Profiling is enabled and will increase the runtime.\n\n");
#endif
// Perform the general initialisation steps for the mesh etc
initialise_mpi(argc, argv, &mesh.rank, &mesh.nranks);
initialise_devices(mesh.rank);
initialise_comms(&mesh);
initialise_mesh_2d(&mesh);
SharedData shared_data = {0};
initialise_shared_data_2d(mesh.local_nx, mesh.local_ny, mesh.pad, mesh.width,
mesh.height, neutral_data.neutral_params_filename, mesh.edgex, mesh.edgey, &shared_data);
handle_boundary_2d(mesh.local_nx, mesh.local_ny, &mesh, shared_data.density,
NO_INVERT, PACK);
initialise_neutral_data(&neutral_data, &mesh);
// Make sure initialisation phase is complete
barrier();
// Main timestep loop where we will track each particle through time
int tt;
double wallclock = 0.0;
double elapsed_sim_time = 0.0;
struct Profile profile;
for (tt = 1; tt <= mesh.niters; ++tt) {
if (mesh.rank == MASTER) {
printf("\nIteration %d\n", tt);
}
if (visit_dump) {
plot_particle_density(&neutral_data, &mesh, tt, neutral_data.nparticles,
elapsed_sim_time);
}
uint64_t facet_events = 0;
uint64_t collision_events = 0;
START_PROFILING(&profile);
// Begin the main solve step
solve_transport_2d(
mesh.local_nx - 2 * mesh.pad, mesh.local_ny - 2 * mesh.pad,
mesh.global_nx, mesh.global_ny, tt, mesh.pad, mesh.x_off, mesh.y_off,
mesh.dt, neutral_data.nparticles, &neutral_data.nlocal_particles,
mesh.neighbours, neutral_data.local_particles,
shared_data.density, mesh.edgex, mesh.edgey, mesh.edgedx, mesh.edgedy,
neutral_data.cs_scatter_table, neutral_data.cs_absorb_table,
neutral_data.energy_deposition_tally, neutral_data.nfacets_reduce_array,
neutral_data.ncollisions_reduce_array, neutral_data.nprocessed_reduce_array,
&facet_events, &collision_events);
barrier();
const char p = '0' + tt;
STOP_PROFILING(&profile, &p);
double step_time = profile.profiler_entries[tt-1].time;
wallclock += step_time;
printf("Step time %.4fs\n", step_time);
printf("Wallclock %.4fs\n", wallclock);
printf("Facets %lu\n", facet_events);
printf("Collisions %lu\n", collision_events);
// Note that this metric is only valid in the single event case
printf("Facet Events / s %.2e\n", facet_events / step_time);
printf("Collision Events / s %.2e\n", collision_events / step_time);
elapsed_sim_time += mesh.dt;
if (visit_dump) {
char tally_name[100];
sprintf(tally_name, "energy%d", tt);
int dneighbours[NNEIGHBOURS] = {EDGE, EDGE, EDGE, EDGE, EDGE, EDGE};
write_all_ranks_to_visit(
mesh.global_nx, mesh.global_ny, mesh.local_nx - 2 * mesh.pad,
mesh.local_ny - 2 * mesh.pad, mesh.pad, mesh.x_off, mesh.y_off,
mesh.rank, mesh.nranks, dneighbours,
neutral_data.energy_deposition_tally, tally_name, 0,
elapsed_sim_time);
}
// Leave the simulation if we have reached the simulation end time
if (elapsed_sim_time >= mesh.sim_end) {
if (mesh.rank == MASTER)
printf("Reached end of simulation time\n");
break;
}
}
if (visit_dump) {
plot_particle_density(&neutral_data, &mesh, tt, neutral_data.nparticles,
elapsed_sim_time);
}
validate(mesh.local_nx - 2 * mesh.pad, mesh.local_ny - 2 * mesh.pad,
neutral_data.neutral_params_filename, mesh.rank,
neutral_data.energy_deposition_tally);
if (mesh.rank == MASTER) {
//PRINT_PROFILING_RESULTS(&p);
printf("Final Wallclock %.9fs\n", wallclock);
printf("Elapsed Simulation Time %.6fs\n", elapsed_sim_time);
}
return 0;
}
// This is a bit hacky and temporary for now
void plot_particle_density(NeutralData* neutral_data, Mesh* mesh, const int tt,
const int nparticles,
const double elapsed_sim_time) {
double* temp =
(double*)malloc(sizeof(double) * mesh->local_nx * mesh->local_ny);
if (!temp) {
TERMINATE("Could not allocate data for printing.\n");
}
for (int ii = 0; ii < nparticles; ++ii) {
Particle* particle = &neutral_data->local_particles[ii];
#ifdef SoA
const int cellx = particle->cellx[ii] - mesh->x_off;
const int celly = particle->celly[ii] - mesh->y_off;
#else
const int cellx = particle->cellx - mesh->x_off;
const int celly = particle->celly - mesh->y_off;
#endif
temp[celly * (mesh->local_nx - 2 * mesh->pad) + cellx] += 1.0;
}
// Dummy neighbours that stops any padding from happening
int neighbours[NNEIGHBOURS] = {EDGE, EDGE, EDGE, EDGE, EDGE, EDGE};
char particles_name[100];
sprintf(particles_name, "particles%d", tt);
write_all_ranks_to_visit(
mesh->global_nx, mesh->global_ny, mesh->local_nx - 2 * mesh->pad,
mesh->local_ny - 2 * mesh->pad, mesh->pad, mesh->x_off, mesh->y_off,
mesh->rank, mesh->nranks, neighbours, temp, particles_name, 0,
elapsed_sim_time);
free(temp);
}
|
pre_processing_dodg.h | #pragma once
#include <future>
#include "pre_processing.h"
#include "util/stat.h"
#include "util/util.h"
bool RankLT(int du, int dv, int u, int v) {
// assert(u != v);
return du < dv || ((du == dv) && (u < v));
}
template<typename T, typename OFF>
void ConvertEdgeListToDODGCSR(OFF num_edges, pair<T, T> *&edge_lst,
uint32_t num_vertices, uint32_t *°_lst, OFF *&off, int32_t *&adj_lst,
int max_omp_threads) {
ConvertEdgeListToDODGCSR(num_edges, edge_lst,
num_vertices, deg_lst, off, adj_lst,
max_omp_threads, [](size_t it) {
return true;
});
}
template<typename T, typename OFF>
void ConvertEdgeListToDODGCSRDiskBuffer(OFF num_edges, pair<T, T> *&edge_lst,
uint32_t num_vertices, uint32_t *°_lst, OFF *&off, int32_t *&adj_lst,
int max_omp_threads) {
ConvertEdgeListToDODGCSRDiskBuffer(num_edges, edge_lst,
num_vertices, deg_lst, off, adj_lst,
max_omp_threads, [](size_t it) {
return true;
});
}
template<typename T, typename F, typename OFF>
void ConvertEdgeListToDODGCSRDiskBuffer(OFF num_edges, pair<T, T> *&edge_lst,
uint32_t num_vertices, uint32_t *°_lst, OFF *&off, int32_t *&adj_lst,
int max_omp_threads, F f) {
Timer convert_timer;
deg_lst = (uint32_t *) malloc(sizeof(uint32_t) * (num_vertices + 1));
auto *dodg_deg_lst = (uint32_t *) malloc(sizeof(uint32_t) * (num_vertices + 1));
off = (OFF *) malloc(sizeof(OFF) * (num_vertices + 1));
auto cur_write_off = (OFF *) malloc(sizeof(OFF) * (num_vertices + 1));
vector<OFF> histogram;
auto io_future = std::async(std::launch::async, [=]() {
auto tmp_file_fd = open("tmp_el.bin", O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
size_t size = num_edges * sizeof(int32_t) * 2;
ftruncate(tmp_file_fd, size);
auto write_buf = (pair<T, T> *) mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, tmp_file_fd, 0);
memcpy(write_buf, edge_lst, size);
return write_buf;
});
#pragma omp parallel num_threads(max_omp_threads)
{
MemSetOMP(deg_lst, 0, num_vertices + 1);
MemSetOMP(off, 0, num_vertices + 1);
#pragma omp single
log_info("[%s]: InitTime: %.9lf s", __FUNCTION__, convert_timer.elapsed());
// Histogram.
EdgeListHistogram(num_vertices, num_edges, edge_lst, deg_lst, f);
MemSetOMP(dodg_deg_lst, 0, num_vertices + 1);
#pragma omp for
for (size_t i = 0u; i < num_edges; i++) {
if (f(i)) {
auto src = edge_lst[i].first;
auto dst = edge_lst[i].second;
if (RankLT(deg_lst[src], deg_lst[dst], src, dst))
__sync_fetch_and_add(&dodg_deg_lst[src], 1);
else
__sync_fetch_and_add(&dodg_deg_lst[dst], 1);
}
}
#pragma omp single
log_info("[%s]: Histogram Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
// PrefixSum.
InclusivePrefixSumOMP(histogram, off + 1, num_vertices, [&dodg_deg_lst](uint32_t it) {
return dodg_deg_lst[it];
});
#pragma omp single
{
log_debug("%zu", off[num_vertices]);
assert(off[num_vertices] <= num_edges);
}
MemCpyOMP(cur_write_off, off, num_vertices + 1);
// Write Edge List to File, Using Page Cache.
#pragma omp single
{
log_info("Mem Usage: %s KB", FormatWithCommas(getValue()).c_str());
auto tmp = edge_lst;
edge_lst = io_future.get();
log_info("Mem Usage: %s KB", FormatWithCommas(getValue()).c_str());
size_t size = num_edges * sizeof(int32_t) * 2;
free(tmp);
madvise(edge_lst, size, MADV_SEQUENTIAL);
if (adj_lst == nullptr) {
log_info("Allocate Inside (adj_lst)...");
adj_lst = (int32_t *) malloc(sizeof(int32_t) * off[num_vertices]);
}
log_info("[%s]: PrefixSum Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
}
// Scatter.
#pragma omp for schedule(dynamic, 32*4096/8)
for (size_t i = 0; i < num_edges; i++) {
if (f(i)) {
auto src = edge_lst[i].first;
auto dst = edge_lst[i].second;
if (!RankLT(deg_lst[src], deg_lst[dst], src, dst)) {
swap(src, dst);
}
auto old_offset = __sync_fetch_and_add(&(cur_write_off[src]), 1);
adj_lst[old_offset] = dst;
}
}
}
free(dodg_deg_lst);
free(cur_write_off);
log_info("[%s]: Total Conversion Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
}
void RevereseDODG(graph_t *org_g, graph_t *rev_g) {
Timer convert_timer;
auto max_omp_threads = omp_get_max_threads();
rev_g->n = org_g->n;
rev_g->m = org_g->m;
auto num_vertices = org_g->n;
rev_g->num_edges = (uint32_t *) malloc(sizeof(uint32_t) * (num_vertices + 1));
auto *dodg_deg_lst = (uint32_t *) malloc(sizeof(uint32_t) * (num_vertices + 1));
auto cur_write_off = (uint32_t *) malloc(sizeof(uint32_t) * (num_vertices + 1));
vector<uint32_t> histogram;
#pragma omp parallel num_threads(max_omp_threads)
{
MemSetOMP(rev_g->num_edges, 0, num_vertices + 1);
MemSetOMP(dodg_deg_lst, 0, num_vertices + 1);
MemSetOMP(cur_write_off, 0, num_vertices + 1);
#pragma omp single
log_info("[%s]: InitTime: %.9lf s", __FUNCTION__, convert_timer.elapsed());
// Histogram.
#pragma omp for schedule(dynamic, 1000)
for (size_t u = 0u; u < org_g->n; u++) {
for (auto j = org_g->num_edges[u]; j < org_g->num_edges[u + 1]; j++) {
auto v = org_g->adj[j];
__sync_fetch_and_add(&dodg_deg_lst[v], 1);
}
}
#pragma omp single
log_info("[%s]: Histogram Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
// PrefixSum.
InclusivePrefixSumOMP(histogram, rev_g->num_edges + 1, num_vertices, [&dodg_deg_lst](uint32_t it) {
return dodg_deg_lst[it];
});
MemCpyOMP(cur_write_off, rev_g->num_edges, num_vertices + 1);
#pragma omp single
{
log_info("Mem Usage: %s KB", FormatWithCommas(getValue()).c_str());
if (rev_g->adj == nullptr) {
log_info("Allocate Inside (adj_lst)...");
rev_g->adj = (int32_t *) malloc(sizeof(int32_t) * rev_g->num_edges[num_vertices]);
}
log_info("[%s]: PrefixSum Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
}
// Scatter.
#pragma omp for schedule(dynamic, 32*4096/8)
for (size_t u = 0u; u < org_g->n; u++) {
for (auto j = org_g->num_edges[u]; j < org_g->num_edges[u + 1]; j++) {
auto src = org_g->adj[j];
auto dst = u;
auto old_offset = __sync_fetch_and_add(&(cur_write_off[src]), 1);
rev_g->adj[old_offset] = dst;
}
}
// Parallel Transform
#pragma omp for schedule(dynamic, 100)
for (auto i = 0; i < rev_g->n; i++) {
sort(rev_g->adj + rev_g->num_edges[i], rev_g->adj + rev_g->num_edges[i + 1]);
}
}
free(dodg_deg_lst);
free(cur_write_off);
log_info("[%s]: Total Conversion Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
}
template<typename T, typename F, typename OFF>
void ConvertEdgeListToDODGCSR(OFF num_edges, pair<T, T> *&edge_lst,
uint32_t num_vertices, uint32_t *°_lst, OFF *&off, int32_t *&adj_lst,
int max_omp_threads, F f) {
Timer convert_timer;
deg_lst = (uint32_t *) malloc(sizeof(uint32_t) * (num_vertices + 1));
auto *dodg_deg_lst = (uint32_t *) malloc(sizeof(uint32_t) * (num_vertices + 1));
off = (OFF *) malloc(sizeof(OFF) * (num_vertices + 1));
auto cur_write_off = (OFF *) malloc(sizeof(OFF) * (num_vertices + 1));
vector<OFF> histogram;
#pragma omp parallel num_threads(max_omp_threads)
{
MemSetOMP(deg_lst, 0, num_vertices + 1);
MemSetOMP(off, 0, num_vertices + 1);
#pragma omp single
log_info("[%s]: InitTime: %.9lf s", __FUNCTION__, convert_timer.elapsed());
// Histogram.
EdgeListHistogram(num_vertices, num_edges, edge_lst, deg_lst, f);
MemSetOMP(dodg_deg_lst, 0, num_vertices + 1);
#pragma omp for
for (size_t i = 0u; i < num_edges; i++) {
if (f(i)) {
auto src = edge_lst[i].first;
auto dst = edge_lst[i].second;
if (RankLT(deg_lst[src], deg_lst[dst], src, dst))
__sync_fetch_and_add(&dodg_deg_lst[src], 1);
else
__sync_fetch_and_add(&dodg_deg_lst[dst], 1);
}
}
#pragma omp single
log_info("[%s]: Histogram Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
// PrefixSum.
InclusivePrefixSumOMP(histogram, off + 1, num_vertices, [&dodg_deg_lst](uint32_t it) {
return dodg_deg_lst[it];
});
#pragma omp single
{
log_debug("%zu", off[num_vertices]);
assert(off[num_vertices] <= num_edges);
}
MemCpyOMP(cur_write_off, off, num_vertices + 1);
// Write Edge List to File, Using Page Cache.
#pragma omp single
{
log_info("Mem Usage: %s KB", FormatWithCommas(getValue()).c_str());
if (adj_lst == nullptr) {
log_info("Allocate Inside (adj_lst)...");
adj_lst = (int32_t *) malloc(sizeof(int32_t) * off[num_vertices]);
}
log_info("[%s]: PrefixSum Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
}
// Scatter.
#pragma omp for schedule(dynamic, 32*4096/8)
for (size_t i = 0; i < num_edges; i++) {
if (f(i)) {
auto src = edge_lst[i].first;
auto dst = edge_lst[i].second;
if (!RankLT(deg_lst[src], deg_lst[dst], src, dst)) {
swap(src, dst);
}
auto old_offset = __sync_fetch_and_add(&(cur_write_off[src]), 1);
adj_lst[old_offset] = dst;
}
}
}
free(dodg_deg_lst);
free(cur_write_off);
log_info("[%s]: Total Conversion Time: %.9lf s", __FUNCTION__, convert_timer.elapsed());
}
inline void ReorderDegDescendingDODG(graph_t &g, vector<int32_t> &new_vid_dict, vector<int32_t> &old_vid_dict,
int32_t *&new_adj, uint32_t *°_lst) {
Timer timer;
auto max_omp_threads = omp_get_max_threads();
auto max_deg = 0;
auto *old_vid_dict_buffer = (int32_t *) malloc(sizeof(int32_t) * g.n);
uint32_t *write_off = nullptr;
uint32_t *bucket_ptrs = nullptr;
auto histogram = vector<uint32_t>((max_omp_threads + 1) * CACHE_LINE_ENTRY, 0);
#pragma omp parallel num_threads(max_omp_threads)
{
#pragma omp for reduction(max: max_deg)
for (auto i = 0; i < g.n; i++) {
max_deg = max<int>(max_deg, deg_lst[i]);
}
#pragma omp single nowait
{
old_vid_dict = vector<int32_t>(g.n);
}
#pragma omp for
for (auto i = 0u; i < g.n; i++) {
old_vid_dict_buffer[i] = i;
}
auto ptr = &old_vid_dict[0];
BucketSortSmallBuckets(histogram, old_vid_dict_buffer, ptr, write_off, bucket_ptrs,
g.n, max_deg + 1, [deg_lst, old_vid_dict_buffer, max_deg](int i) {
auto u = old_vid_dict_buffer[i];
return max_deg - (deg_lst[u]);
});
}
free(write_off);
free(bucket_ptrs);
free(old_vid_dict_buffer);
log_info("Deg-descending time: %.9lf s", timer.elapsed());
Reorder(g, new_vid_dict, old_vid_dict, new_adj);
}
|
DRB037-truedepseconddimension-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized in this program.
The inner loop has true dependence.
Data race pair: b[i][j]@63:7 vs. b[i][j-1]@63:15
*/
#include <stdlib.h>
#include <stdio.h>
double b[1000][1000];
int main(int argc, char* argv[])
{
int i,j;
int n=1000, m=1000;
#pragma omp parallel for private(i, j)
for (i=0;i<n;i++)
#pragma omp parallel for private(j)
for (j=1;j<m;j++)
b[i][j]= i * m + j;
#pragma omp parallel for private(i, j)
for (i=0;i<n;i++)
for (j=1;j<m;j++)
b[i][j]=b[i][j-1];
for (i=0;i<n;i++)
for (j=1;j<m;j++)
printf("%lf\n",b[i][j]);
return 0;
}
|
ops.h | #pragma once
#ifndef OPS_H_
#define OPS_H_
#include <op_boilerplate.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <vector>
#include <Environment.h>
#include <loops/summarystatsreduce.h>
#define MIN 1e-12
#define MAX_FLOAT 1e37
#define MIN_FLOAT 1e-37
#define MAX_INT 2147483647
#define MIN_CUTFOFF -3.79297773665f
#define FLOAT_MIN_NORMAL 1.17549435e-38
#define EPS 1e-5
#define AFFINITY close
#define DOUBLE_PI_T T(2.0 * 3.14159265358979323846)
#define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(T *dx, Nd4jLong *xShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(T *x, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){}
#ifdef __CUDACC__
#include <helpers/sharedmem.h>
#define no_op_exec_special_cuda static __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeBuffer,T *result, Nd4jLong *resultShapeBuffer,T *extraParams, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {}
#define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {}
#else
// hacky fix for isnan/being being out of scope
//#ifdef IOS
//#define isinf(x) 0 // this isn't right. But std::isinf fails
//#define isnan(x) 0
//#else
//#define isnan std::isnan
//#define isinf std::isinf
//#endif
#define no_op_exec_special_cuda
#define no_op_exec_special_accumulation_cuda
#endif
#define SELU_ALPHA 1.6732632423543772848170429916717
#define SELU_LAMBDA 1.0507009873554804934193349852946
#ifdef _OPENMP
#pragma omp declare reduction(maxT : float,double,float16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minT : float,double,float16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(sumT : float,double,float16 : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0.0f)
#endif
namespace functions {
namespace indexreduce {
template<typename T>
struct IndexValue {
T value;
Nd4jLong index;
};
}
namespace summarystats {
template <typename T>
class SummaryStatsData;
}
}
namespace simdOps {
template<typename T>
class Add {
public:
op_def static T op(T d1, T d2) {
return d1 + d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 + d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return d1 + params[0];
}
op_def static T startingValue() {
return static_cast<T>(0.f);
}
};
template<typename T>
class Subtract {
public:
op_def static T op(T d1, T d2) {
return d1 - d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 - d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return d1 - params[0];
}
};
template<typename T>
class SquaredSubtract {
public:
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_pow<T>(d1 - d2, static_cast<T>(2.f));
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_pow<T>(d1 - d2, static_cast<T>(2.f));
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_pow<T>(d1 - params[0], static_cast<T>(2.f));
}
};
template<typename T>
class ReverseSubtract {
public:
op_def static T op(T d1, T d2) {
return d2 - d1;
}
op_def static T op(T d1, T d2, T *params) {
return d2 - d1;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return params[0] - d1;
}
};
template<typename T>
class LogPoisonLossFull {
public:
op_def static T op(T z, T c) {
return (nd4j::math::nd4j_exp<T>(c) - z * c + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z)));
}
op_def static T op(T z, T c, T *params) {
return (nd4j::math::nd4j_exp<T>(c) - z * c + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z)));
}
op_def static T op(T z) {
return (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z));
}
// op for MetaOps
op_def static T op(T z, T *params) {
return (nd4j::math::nd4j_exp<T>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<T>(z) - z + static_cast<T>(0.5f) * nd4j::math::nd4j_log<T>(DOUBLE_PI_T * z)));
}
};
template<typename T>
class LogPoisonLoss {
public:
op_def static T op(T z, T c) {
return (nd4j::math::nd4j_exp<T>(c) - z * c);
}
op_def static T op(T z, T c, T *params) {
return (nd4j::math::nd4j_exp<T>(c) - z * c);
}
op_def static T op(T z) {
return (z);
}
// op for MetaOps
op_def static T op(T z, T *params) {
return (nd4j::math::nd4j_exp<T>(params[0]) - z * params[0]);
}
};
template<typename T>
class Multiply {
public:
op_def static T op(T d1, T d2) {
return d1 * d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 * d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return d1 * params[0];
}
op_def static T startingValue() {
return static_cast<T>(1.f);
}
};
template<typename T>
class Divide {
public:
op_def static T op(T d1, T d2) {
return d1 / d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 / d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return d1 / params[0];
}
op_def static T startingValue() {
return static_cast<T>(1.f);
}
};
template<typename T>
class SafeDivide {
public:
op_def static T op(T d1, T d2) {
if(d2 == static_cast<T>(0.f))
return static_cast<T>(0.f);
return d1 / d2;
}
op_def static T op(T d1, T d2, T *params) {
if(d2 == static_cast<T>(0.f))
return static_cast<T>(0.f);
return d1 / d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
if(params[0] == static_cast<T>(0.f))
return static_cast<T>(0.f);
return d1 / params[0];
}
};
template<typename T>
class FloorDiv {
public:
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_floor<T>(d1 / d2);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_floor<T>(d1 / d2);
}
op_def static T op(T d1) {
return nd4j::math::nd4j_floor<T>(d1);
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_floor<T>(d1 / params[0]);
}
};
template<typename T>
class TruncateDiv {
public:
op_def static T op(T d1, T d2) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<T>(i1 / i2);
}
op_def static T op(T d1, T d2, T *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(d2);
return static_cast<T>(i1 / i2);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
auto i1 = static_cast<int>(d1);
auto i2 = static_cast<int>(params[0]);
return static_cast<T>(i1 / i2);
}
};
template<typename T>
class Remainder {
public:
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_remainder(d1, d2);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_remainder(d1, d2);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_remainder(d1, params[0]);
}
};
template<typename T>
class FMod {
public:
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_fmod(d1, d2);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_fmod(d1, d2);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_fmod(d1, params[0]);
}
};
template<typename T>
class FloorMod {
public:
op_def static T op(T d1, T d2) {
T m = nd4j::math::nd4j_fmod(d1, d2);;
return (d1 < static_cast<T>(0.0f)) == (d2 < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + d2, d2);
}
op_def static T op(T d1, T d2, T *params) {
T m = nd4j::math::nd4j_fmod(d1, d2);
return (d1 < static_cast<T>(0.0f)) == (d2 < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + d2, d2);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
T m = nd4j::math::nd4j_fmod(d1, params[0]);
return (d1 < static_cast<T>(0.0f)) == (params[0] < static_cast<T>(0.0f)) ? m : nd4j::math::nd4j_fmod<T>(m + params[0], params[0]);
}
};
template<typename T>
class ReverseDivide {
public:
op_def static T op(T d1, T d2) {
return d2 / d1;
}
op_def static T op(T d1, T d2, T *params) {
return d2 / d1;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return params[0] / d1;
}
};
template<typename T>
class Copy {
public:
op_def static T op(T d1, T d2) {
return d2;
}
op_def static T op(T d1, T d2, T *params) {
return d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return params[0];
}
};
template<typename T>
class Copy2 {
public:
op_def static T op(T d1, T d2) {
return d2;
}
op_def static T op(T d1, T d2, T *params) {
return d2;
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return params[0];
}
};
template<typename T>
class Axpy {
public:
op_def static T op(T d1, T d2) {
return d2 + d1;
}
op_def static T op(T d1, T d2, T *params) {
T alpha = params[0];
return alpha * d1 + d2;
}
op_def static T op(T d1) {
return d1;
}
};
template<typename T>
class And {
public:
op_def static T op(T d1, T d2) {
return d2 + d1;
}
op_def static T op(T d1, T d2, T *params) {
T comp = params[0];
return d1 != comp && d2 != comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return static_cast<T>(119.0f);
}
};
template<typename T>
class Or {
public:
op_def static T op(T d1, T d2) {
return d2 + d1;
}
op_def static T op(T d1, T d2, T *params) {
T comp = params[0];
return d1 != comp || d2 != comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T op(T d1) {
return d1;
}
// op for MetaOps
op_def static T op(T d1, T *params) {
return static_cast<T>(119.0f);
}
};
template<typename T>
class Xor {
public:
op_def static T op(T d1, T d2) {
return d2 + d1;
}
op_def static T op(T d1, T d2, T *params) {
T comp = params[0];
return ((d1 == comp && d2 != comp)||(d1 != comp && d2 == comp)) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T op(T d1) {
return d1;
}
};
template<typename T>
class Not {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T comp = params[0];
return d1 == comp ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
};
template<typename T>
class SetValOrLess {
public:
op_def static T op(T d1, T d2, T *params) {
if (d2 < d1) {
return d1;
}
return d2;
}
};
template<typename T>
class Mod {
public:
/*
// just a optional note, feel free to remove later
op_def static half op(half d1, half d2, half *params) {
return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr));
}
*/
op_def static T op(T d1, T d2) {
return (int)d1 % (int)d2;
}
op_def static T op(T d1, T d2, T *params) {
return (int)d1 % (int)d2;
}
// op for MetaOp
op_def static T op(T d1, T *params) {
return (int)d1 % (int)params[0];
}
};
template<typename T>
class ReverseMod {
public:
op_def static T op(T d1, T d2) {
return (int)d2 % (int)d1;
}
op_def static T op(T d1, T d2, T *params) {
return (int)d2 % (int)d1;
}
// op for MetaOp
op_def static T op(T d1, T *params) {
return (int)params[0] % (int)d1;
}
};
/**
* Whether 2 elements in an array
* are epsilion equal
*/
template<typename T>
class Epsilon {
public:
op_def static T op(T d1, T d2, T *params) {
T diff = d1 - d2;
T absDiff = nd4j::math::nd4j_abs<T>(diff);
if (absDiff <= static_cast<T>(MIN))
return static_cast<T>(1.0f);
return static_cast<T>(0.0f);
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class EqualTo {
public:
op_def static T op(T d1, T d2) {
return d1 == d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 == d2;
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class NotEqualTo {
public:
op_def static T op(T d1, T d2) {
return d1 != d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 != d2;
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class GreaterThanOrEqual {
public:
op_def static T op(T d1, T d2) {
return d1 >= d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 >= d2;
}
// FIXME: this signature clashes with MetaOp stuff
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class GreaterThan {
public:
op_def static T op(T d1, T d2) {
return d1 > d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 > d2;
}
// FIXME: this signature clashes with MetaOp stuff
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class LessThan {
public:
op_def static T op(T d1, T d2) {
return d1 < d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 < d2;
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class LessThanOrEqual {
public:
op_def static T op(T d1, T d2) {
return d1 <= d2;
}
op_def static T op(T d1, T d2, T *params) {
return d1 <= d2;
}
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class Abs {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_abs<T>(d1);
}
};
template<typename T>
class Ceiling {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_ceil<T>(d1);
}
};
template<typename T>
class Cosine {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_cos<T>(d1);
}
};
template<typename T>
class Exp {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_exp<T>(d1);
}
};
template<typename T>
class HardTanhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return ((d1 >= static_cast<T>(-1.0f) && d1 <= static_cast<T>(1.0f)) ? static_cast<T>(1.0f) : static_cast<T>(0.0f));
}
};
template<typename T>
class HardTanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
if (d1 < static_cast<T>(-1.0f))
return static_cast<T>(-1.0f);
else if (d1 > static_cast<T>(1.0f))
return static_cast<T>(1.0f);
else
return d1;
}
};
template<typename T>
class Floor {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_floor<T>(d1);
}
};
template<typename T>
class Log {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_log<T>(d1);
}
};
template<typename T>
class Log1p {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_log<T>(1+d1);
}
};
template<typename T>
class LogX {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_log<T>(d1) / nd4j::math::nd4j_log<T>(params[0]) ;
}
};
template<typename T>
class StabilizeFP16 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
if (d1 <= static_cast<T>(0.f)) return static_cast<T>(0.001f);
else return d1;
}
};
template<typename T>
class SpecialDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 * (static_cast<T>(1.0f) - d1);
}
};
template<typename T>
class Neg {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return -d1;
}
};
template<typename T>
class Erf {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_erf<T>(d1);
}
};
template<typename T>
class Erfc {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_erfc<T>(d1);
}
};
template<typename T>
class Reciprocal {
public:
no_op_exec_special
no_op_exec_special_cuda
// op_def static T op(T d1) {
// return (T(1.0f) / d1);
// }
// op for MetaOps
op_def static T op(T d1, T *params) {
return (static_cast<T>(1.0f)/d1);
}
};
template<typename T>
class Sqr {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.f));
}
op_def static T op(T d1) {
return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f));
}
};
template<typename T>
class RelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_re<T>(d1, params[0]);
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_re<T>(d1, d2);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_re<T>(d1, d2);
}
op_def static T op(T d1) {
return static_cast<T>(0.0f);
}
};
template<typename T>
class BinaryRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T d2 = params[0];
T threshold = params[1];
return nd4j::math::nd4j_re<T>(d1, d2) > threshold ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T op(T d1, T d2, T *params) {
T threshold = params[0];
return nd4j::math::nd4j_re<T>(d1, d2) > threshold ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T op(T d1) {
return static_cast<T>(0.0f);
}
};
template<typename T>
class BinaryMinimumAbsoluteRelativeError {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T d2 = params[0];
T thresholdRelative = params[1];
T thresholdAbsolute = params[2];
return nd4j::math::nd4j_re<T>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<T>(d1 - d2) < thresholdAbsolute ? static_cast<T>(0.0f) : static_cast<T>(1.0f)) : static_cast<T>(0.0f);
}
op_def static T op(T d1, T d2, T *params) {
T thresholdRelative = params[0];
T thresholdAbsolute = params[1];
return nd4j::math::nd4j_re<T>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<T>(d1 - d2) < thresholdAbsolute ? static_cast<T>(0.0f) : static_cast<T>(1.0f)) : static_cast<T>(0.0f);
}
op_def static T op(T d1) {
return static_cast<T>(0.0f);
}
};
template<typename T>
class Pow {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_pow<T>(d1, params[0]);
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_pow<T>(d1, d2);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_pow<T>(d1, d2);
}
op_def static T op(T d1) {
return d1;
}
};
template<typename T>
class PowDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return params[0] * nd4j::math::nd4j_pow<T>(d1, params[0] - static_cast<T>(1.f));
}
op_def static T op(T d1, T d2) {
return d2 * nd4j::math::nd4j_pow<T>(d1, d2 - static_cast<T>(1.f));
}
op_def static T op(T d1, T d2, T *params) {
return d2 * nd4j::math::nd4j_pow<T>(d1, d2 - static_cast<T>(1.f));
}
op_def static T op(T d1) {
return d1;
}
};
template<typename T>
class Round {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_round<T>(d1);
}
};
template<typename T>
class IsNan {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_isnan(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
};
template<typename T>
class Expm1 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_exp(d1) - static_cast<T>(1.0f);
}
};
template<typename T>
class IsInf {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_isinf<T>(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
};
template<typename T>
class IsFinite {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_isfin<T>(d1) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
};
template<typename T>
class ClipByValue {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
if (d1 > params[1])
return params[1];
else if (d1 < params[0])
return params[0];
else return d1;
}
};
template<typename T>
class Swish {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 * nd4j::math::nd4j_sigmoid<T>(d1);
}
};
template<typename T>
class SwishDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T ex = nd4j::math::nd4j_pow<T>(static_cast<T>(M_E), d1);
return (ex * (d1 + ex + static_cast<T>(1.f))) / nd4j::math::nd4j_pow<T>((ex + static_cast<T>(1.f)) , static_cast<T>(2.0f));
}
};
template<typename T>
class LogSigmoid {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_log(nd4j::math::nd4j_sigmoid<T>(d1));
}
};
template<typename T>
class LogSigmoidDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T ex = nd4j::math::nd4j_pow<T>(M_E, d1);
return static_cast<T>(1.f) / (ex + static_cast<T>(1.f));
}
};
template<typename T>
class Sigmoid {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_sigmoid<T>(d1);
}
};
template<typename T>
class SigmoidDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_sigmoidderivative<T>(d1);
}
};
template<typename T>
class HardSigmoid {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_min<T>(static_cast<T>(1.0f), nd4j::math::nd4j_max<T>(static_cast<T>(0.0f), (static_cast<T>(0.2f)) * d1 + static_cast<T>(0.5f)));
}
};
template<typename T>
class HardSigmoidDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 < static_cast<T>(-2.5f) || d1 > static_cast<T>(2.5f) ? static_cast<T>(0.0f) : static_cast<T>(0.2f);
}
};
/**
* Scale to be between a min and max
*/
template<typename T>
class SetRange {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T min = params[0];
T max = params[1];
if (d1 >= min && d1 <= max)
return d1;
if (min == static_cast<T>(0.0f) && max == static_cast<T>(1.0f)) {
auto val = static_cast<T>(1.0f) / (static_cast<T>(1.0f) + nd4j::math::nd4j_exp<T>(-d1));
return (nd4j::math::nd4j_floor<T>(val * (max - min)) + min);
}
auto ret = (nd4j::math::nd4j_floor<T>(d1 * (max - min)) + min);
return ret;
}
};
template<typename T>
class Sin {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_sin<T>(d1);
}
};
template<typename T>
class Square {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 * d1;
}
};
template<typename T>
class Sqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_sqrt<T>(d1);
}
};
template<typename T>
class RSqrt {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.0f) / nd4j::math::nd4j_sqrt<T>(d1);
}
};
template<typename T>
class Rint {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_rint<T>(d1);
}
};
template<typename T>
class SoftPlus {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::softplus<T>(d1);
}
};
template<typename T>
class Sign {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return (d1 > static_cast<T>(0.0f)) - (d1 < static_cast<T>(0.0f));
}
};
template<typename T>
class TimesOneMinus {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 * (static_cast<T>(1.0f) - d1);
}
};
template<typename T>
class RationalTanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
// keep 2/3 as runtime variable, to match precision
auto dis = (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * d1;
auto tanh = nd4j::math::nd4j_sgn<T>(dis) * (static_cast<T>(1.0f) - (static_cast<T>(1.0f) / (static_cast<T>(1.0f) + nd4j::math::nd4j_abs<T>(dis) + nd4j::math::nd4j_pow<T>(dis, static_cast<T>(2.0f)) + static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(4.0f)) )));
return static_cast<T>(1.7159f) * tanh;
}
};
template<typename T>
class RationalTanhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
auto dis = (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * d1;
auto a = static_cast<T>(1.0f) + nd4j::math::nd4j_abs<T>(dis) + nd4j::math::nd4j_pow<T>(dis, static_cast<T>(2.)) + static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(4.f));
auto tDeriv = (static_cast<T>(1.0f) + nd4j::math::nd4j_sign<T>(dis) * (static_cast<T>(2.0f) * dis + static_cast<T>(4.0f) * static_cast<T>(1.41645f) * nd4j::math::nd4j_pow<T>(dis, static_cast<T>(3.f)))) / (a * a);
return static_cast<T>(1.7159f) * (static_cast<T>(2.0f) / static_cast<T>(3.0f)) * tDeriv;
}
};
template<typename T>
class Tanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_tanh<T>(d1);
}
};
template<typename T>
class RectifiedTanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_max<T>(static_cast<T>(0.0f), nd4j::math::nd4j_tanh<T>(d1));
}
};
template<typename T>
class RectifiedTanhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 > static_cast<T>(0.0f) ? nd4j::math::nd4j_tanhderivative<T>(d1) : static_cast<T>(0.0f);
}
};
template<typename T>
class ATanh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_atanh<T>(d1);
}
};
template<typename T>
class TanhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_tanhderivative<T>(d1);
}
};
template<typename T>
class Cube {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 * d1 * d1;
}
};
template<typename T>
class CubeDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return 3 * d1 * d1;
}
};
template<typename T>
class ACos {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_acos<T>(d1);
}
};
template<typename T>
class ASinh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_asinh<T>(d1);
}
};
template<typename T>
class ASinhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.f) / (nd4j::math::nd4j_sqrt(nd4j::math::nd4j_pow(d1, static_cast<T>(2.f)) + static_cast<T>(1.f)));
}
};
template<typename T>
class ACosh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_acosh<T>(d1);
}
};
template<typename T>
class ACoshDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.f) / (nd4j::math::nd4j_sqrt(d1 - static_cast<T>(1.f)) * nd4j::math::nd4j_sqrt(d1 + static_cast<T>(1.f)));
}
};
template<typename T>
class Ones {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.0f);
}
};
template<typename T>
class SoftSign {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_softsign<T>(d1);
}
};
template<typename T>
class SoftSignDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_softsignderivative<T>(d1);
}
};
template<typename T>
class MatchCondition {
public:
no_op_exec_special
no_op_exec_special_cuda
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
// this op return 1.0 if condition met, 0.0 otherwise
op_def static T op(T d1, T *extraParams) {
T compare = extraParams[0];
T eps = extraParams[1];
auto mode = static_cast<int>(extraParams[2]);
//nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode);
if (mode == 0) // equals
return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? 1.0 : 0.0;
else if (mode == 1) // not equals
return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? 1.0 : 0.0;
else if (mode == 2) // less_than
return d1 < compare? 1.0 : 0.0;
else if (mode ==3) // greater_than
return d1 > compare? 1.0 : 0.0;
else if (mode == 4) // less_or_equals_than
return d1 <= compare? 1.0 : 0.0;
else if (mode == 5) // greater_or_equals_than
return d1 >= compare? 1.0 : 0.0;
else if (mode == 6) // abs_less_than
return nd4j::math::nd4j_abs<T>(d1) < compare? 1.0 : 0.0;
else if (mode == 7) // abs_greater_than
return nd4j::math::nd4j_abs<T>(d1) > compare? 1.0 : 0.0;
else if (mode == 8) // is inf
return nd4j::math::nd4j_isinf(d1) ? 1.0 : 0.0;
else if (mode == 9) // is nan
return nd4j::math::nd4j_isnan(d1) ? 1.0 : 0.0;
else if (mode == 10)
return (d1 == compare) ? 1.0 : 0.0;
else if (mode == 11)
return (d1 != compare) ? 1.0 : 0.0;
else if (mode == 12) // abs_greater_or_equals_than
return nd4j::math::nd4j_abs<T>(d1) >= compare? 1.0 : 0.0;
else if (mode == 13) // abs_less_or_equals_than
return nd4j::math::nd4j_abs<T>(d1) <= compare? 1.0 : 0.0;
else
printf("Undefined match condition: [%i]\n", mode);
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class ELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_elu<T>(d1);
}
};
template<typename T>
class ELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_eluderivative<T>(d1);
}
};
template<typename T>
class RELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 < params[0] ? params[0] : d1;
}
};
template<typename T>
class RELU6 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T relu = d1 < params[0] ? params[0] : d1;
return relu < static_cast<T>(6.f) ? relu : static_cast<T>(6.f);
}
};
template<typename T>
class LeakyRELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_leakyrelu<T>(d1, params[0]);
}
};
template<typename T>
class SELU {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 > static_cast<T>(0.0f) ? static_cast<T>(SELU_LAMBDA) * d1 : static_cast<T>(SELU_LAMBDA) * (static_cast<T>(SELU_ALPHA) * nd4j::math::nd4j_exp<T>(d1) - static_cast<T>(SELU_ALPHA));
}
};
template<typename T>
class SELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1 > static_cast<T>(0.0f) ? static_cast<T>(SELU_LAMBDA) : static_cast<T>(SELU_ALPHA) * static_cast<T>(SELU_LAMBDA) * nd4j::math::nd4j_exp<T>(d1);
}
};
template<typename T>
class LeakyRELUDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
if (d1 >= static_cast<T>(0.0f))
return static_cast<T>(1.0f);
else
return params[0];
}
};
template<typename T>
class ASin {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_asin<T>(d1);
}
};
template<typename T>
class Sinh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_sinh<T>(d1);
}
};
template<typename T>
class SinhDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_cosh<T>(d1);
}
};
template<typename T>
class Cosh {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_cosh<T>(d1);
}
};
template<typename T>
class Tan {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_tan<T>(d1);
}
};
template<typename T>
class TanDerivative {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.0f) / nd4j::math::nd4j_pow<T>(nd4j::math::nd4j_cos<T>(d1), static_cast<T>(2.0f));
}
};
template<typename T>
class ATan {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return nd4j::math::nd4j_atan(d1);
}
};
template<typename T>
class Atan2 {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_atan2<T>(d2, d1);
}
};
template<typename T>
class Identity {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return d1;
}
};
template<typename T>
class Stabilize {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T k = params[0];
if (d1 * k > static_cast<T>(- MIN_CUTFOFF))
return static_cast<T>(- MIN_CUTFOFF) / k;
else if (d1 * k < static_cast<T>(MIN_CUTFOFF))
return static_cast<T>(MIN_CUTFOFF) / k;
return d1;
}
};
template<typename T>
class Step {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return (d1 > params[0] ? static_cast<T>(1.0f) : static_cast<T>(0.0f));
}
};
template<typename T>
class OneMinus {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
return static_cast<T>(1.0f) - d1;
}
};
template<typename T>
class Sum {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class ShannonEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f)) * nd4j::math::nd4j_log<T>(nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f)));
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return -reduction;
}
};
template<typename T>
class LogEntropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_log<T>(nd4j::math::nd4j_pow<T>(d1, static_cast<T>(2.0f)));
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Entropy {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1 * nd4j::math::nd4j_log<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class ASum {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old);
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old);
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_abs<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_abs<T>(reduction);
}
};
template<typename T>
class CountNonZero {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1 == static_cast<T>(0.0f) ? static_cast<T>(0.0f) : static_cast<T>(1.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class CountZero {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1 == static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Prod {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(1.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput * old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput * old;
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Any {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction > static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f) ;
}
};
template<typename T>
class All {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(1.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput * old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput * old;
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction > static_cast<T>(0.0f) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
}
};
template<typename T>
class Mean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction / (int) n;
}
};
template<typename T>
class AMean {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old);
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_abs<T>(opOutput) + nd4j::math::nd4j_abs<T>(old);
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_abs<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_abs<T>(reduction) / static_cast<T>(n);
}
};
template<typename T>
class Max {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return input[0];
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_max<T>(old, opOutput);
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_max<T>(opOutput, old);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_max<T>(d1, d2);
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_max<T>(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class AMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return input[0];
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(old), nd4j::math::nd4j_abs<T>(opOutput));
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(opOutput), nd4j::math::nd4j_abs<T>(old));
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(d1), nd4j::math::nd4j_abs<T>(d2));
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_abs<T>(d1) > nd4j::math::nd4j_abs<T>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_abs<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_abs<T>(reduction);
}
};
template<typename T>
class AMin {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return input[0];
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_min<T>(nd4j::math::nd4j_abs<T>(old), nd4j::math::nd4j_abs<T>(opOutput));
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_min<T>(nd4j::math::nd4j_abs<T>(opOutput), nd4j::math::nd4j_abs<T>(old));
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_min(nd4j::math::nd4j_abs<T>(d1), nd4j::math::nd4j_abs<T>(d2));
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_abs<T>(d1) < nd4j::math::nd4j_abs<T>(d2) ? d1 : d2;
}
// FIXME: this signature overlaps with MetaOp
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_abs<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_abs<T>(reduction);
}
};
template<typename T>
class Min {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return input[0];
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_min<T>(old, opOutput);
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_min<T>(opOutput, old);
}
op_def static T op(T d1, T d2, T *params) {
return nd4j::math::nd4j_min(d1, d2);
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_min(d1, d2);
}
// FIXME: this signature overlaps with MetaOp
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Norm1 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_abs<T>(d1);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class Norm2 {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1 * d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_sqrt<T>(reduction);
}
};
template<typename T>
class SquaredNorm {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return d1 * d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction;
}
};
template<typename T>
class NormFrobenius {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
T v = nd4j::math::nd4j_abs(d1);
return v * v;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_sqrt<T>(reduction);
}
};
template<typename T>
class NormP {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T *extraParams) {
return nd4j::math::nd4j_pow(nd4j::math::nd4j_abs(d1), extraParams[0]);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_pow(reduction, static_cast<T>(1.0f) / extraParams[0]);
}
};
template<typename T>
class NormMax {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(old),
nd4j::math::nd4j_abs<T>(opOutput));
}
op_def static T op(T d1, T *extraParams) {
return d1;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return nd4j::math::nd4j_max<T>(nd4j::math::nd4j_abs<T>(reduction),
nd4j::math::nd4j_abs<T>(reduction));
}
};
template<typename T>
class Variance {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T op(T d1, T *extraParams) {
T mean = extraParams[0];
T ret = d1 - mean;
return ret * ret;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
T bias = extraParams[1];
return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n)))
/ (n - 1);
}
};
/**
* Standard deviation of a buffer
*/
template<typename T>
class StandardDeviation {
public:
no_op_exec_special_accumulation
no_op_exec_special_accumulation_cuda
op_def static T startingValue(const T *input) {
return static_cast<T>(0.0f);
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T op(T d1, T *extraParams) {
T mean = extraParams[0];
T ret = d1 - mean;
return ret * ret;
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
T ret = Variance<T>::postProcess(reduction, n, extraParams);
T sqrtRet = nd4j::math::nd4j_sqrt<T>(ret);
return sqrtRet;
}
};
template<typename T>
class CosineSimilarity {
public:
static const int extraParamsLen = 2;
op_def static T *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParams) {
//delete[] extraParams;
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return reduction / (nd4j::math::nd4j_sqrt<T>(extraParams[0]) * nd4j::math::nd4j_sqrt<T>(extraParams[1]));
}
op_def static T op(T d1, T d2, T *extraParams) {
extraParams[0] += d1 * d1;
extraParams[1] += d2 * d2;
return (d1 * d2);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline T opAtomic(T d1, T d2, T *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<T>(d1 * d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<T>(d2 * d2));
return (d1 * d2);
}
#endif
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return update(old, opOutput, extraParams);
}
};
template<typename T>
class JaccardDistance {
public:
static const int extraParamsLen = 2;
op_def static T *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParams) {
//delete[] extraParams;
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
// num / denom
return (static_cast<T>(1.0f)) - (extraParams[0] / extraParams[1]);
}
op_def static T num(T d1, T d2) {
return nd4j::math::nd4j_min<T>(d1, d2);
}
op_def static T denom(T d1, T d2) {
return nd4j::math::nd4j_max<T>(d1, d2);
}
op_def static T op(T d1, T d2, T *extraParams) {
extraParams[0] += num(d1, d2);
extraParams[1] += denom(d1, d2);
return static_cast<T>(0.0f);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2));
return static_cast<T>(0.0f);
}
#endif
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return update(old, opOutput, extraParams);
}
};
template<typename T>
class SimpleHammingDistance {
public:
static const int extraParamsLen = 0;
op_def static T *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParams) {
//delete[] extraParams;
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return static_cast<T>(reduction / n);
}
op_def static T op(T d1, T d2, T *extraParams) {
return (d1 == d2) ? static_cast<T>(0.0f) : static_cast<T>(1.0f);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParams) {
return op(d1, d2, extraParams);
}
#endif
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return update(old, opOutput, extraParams);
}
};
template<typename T>
class CosineDistance {
public:
static const int extraParamsLen = 2;
op_def static T *generateExtraParams() {
//T *extraParams = new T[2];
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParams) {
//delete[] extraParams;
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParams) {
return (static_cast<T>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<T>(extraParams[0]) * nd4j::math::nd4j_sqrt<T>(extraParams[1])));
}
op_def static T op(T d1, T d2, T *extraParams) {
extraParams[0] += nd4j::math::nd4j_abs<T>(d1) * nd4j::math::nd4j_abs<T>(d1);
extraParams[1] += nd4j::math::nd4j_abs<T>(d2) * nd4j::math::nd4j_abs<T>(d2);
return (d1 * d2);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {
extraParamsTotal[0] += extraParamsLocal[0];
extraParamsTotal[1] += extraParamsLocal[1];
}
#ifdef __CUDACC__
static _CUDA_D inline T opAtomic(T d1, T d2, T *extraParams) {
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<T>(d1) * nd4j::math::nd4j_abs<T>(d1));
nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<T>(d2) * nd4j::math::nd4j_abs<T>(d2));
return (d1 * d2);
}
#endif
op_def static T update(T old, T opOutput, T *extraParams) {
return old + opOutput;
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return update(old, opOutput, extraParams);
}
};
/**
* Dot product between 2 arrays
*/
template<typename T>
class Dot {
public:
static const int extraParamsLen = 0;
op_def static T * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParamsRef) {
//no-op
//delete[] * extraParamsRef;
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) {
return reduction;
}
op_def static T op(T d1, T d2, T *extraParamsRef) {
return d1 * d2;
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static T update(T old, T opOutput, T *extraParamsRef) {
return opOutput + old;
}
op_def static T merge(T old, T opOutput, T *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {}
};
/**
* Op to check equality within arrays
*/
template<typename T>
class EqualsWithEps {
public:
static const int extraParamsLen = 0;
op_def static T * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParamsRef) {
//no-op
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) {
return reduction;
}
op_def static T op(T d1, T d2, T *extraParamsRef) {
T eps = extraParamsRef[2];
T diff = nd4j::math::nd4j_abs<T>(d1 - d2);
// works well except in the range of very large numbers
if (diff <= eps)
return static_cast<T>(0.f);
// Knuth approach
// works well except in the range of very small numbers
if (diff <= nd4j::math::nd4j_max(nd4j::math::nd4j_abs(d1), nd4j::math::nd4j_abs(d2)) * eps)
return static_cast<T>(0.f);
return static_cast<T>(1.f);
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static T update(T old, T opOutput, T *extraParamsRef) {
return opOutput + old;
}
op_def static T merge(T old, T opOutput, T *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {}
};
template<typename T>
class EuclideanDistance {
public:
static const int extraParamsLen = 0;
op_def static T * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParamsRef) {
//no-op
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) {
return nd4j::math::nd4j_sqrt<T>(reduction);
}
op_def static T op(T d1, T d2, T *extraParamsRef) {
T ret = d1 - d2;
return ret * ret;
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
op_def static T update(T old, T opOutput, T *extraParamsRef) {
return opOutput + old;
}
op_def static T merge(T old, T opOutput, T *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {}
};
template<typename T>
class ManhattanDistance {
public:
static const int extraParamsLen = 0;
op_def static T * generateExtraParams() {
return nullptr;
}
op_def static void finalizeExtraParams(T *extraParamsRef) {
//no-op
}
op_def static T startingValue(T *input) {
return static_cast<T>(0.0f);
}
op_def static T postProcess(T reduction, Nd4jLong n, T *extraParamsRef) {
return reduction;
}
op_def static T op(T d1, T d2, T *extraParamsRef) {
return nd4j::math::nd4j_abs<T>(d1 - d2);
}
op_def static T update(T old, T opOutput, T *extraParamsRef) {
return old + opOutput;
}
op_def static void aggregateExtraParams(T *extraParamsTotal, T *extraParamsLocal) {
}
#ifdef __CUDACC__
__device__
static inline T opAtomic(T d1, T d2, T *extraParamsRef) {
return op(d1, d2, extraParamsRef);
}
#endif
#ifndef __clang__
#pragma omp declare simd uniform(extraParamsRef)
#endif
op_def static T merge(T old, T opOutput, T *extraParamsRef) {
return update(old, opOutput, extraParamsRef);
}
};
template<typename T>
class IndexAbsoluteMax {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) {
return nd4j::math::nd4j_abs<T>(val);
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<T>(opOutput.value);
old.value = nd4j::math::nd4j_abs<T>(old.value);
if (opOutput.value > old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (nd4j::math::nd4j_abs<T>(f1.value) > nd4j::math::nd4j_abs<T>(f2.value))
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return MIN_FLOAT;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
};
template<typename T>
class FirstIndex {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) {
return val;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
T res = simdOps::MatchCondition<T>::op(opOutput.value, extraParams);
//printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index);
if (res == static_cast<T>(0.0f))
return old;
if (old.index < 0)
return opOutput;
if (old.index > opOutput.index)
return opOutput;
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return - nd4j::DataTypeUtils::max<T>();
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (f1.index > f2.index)
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
};
template<typename T>
class LastIndex {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) {
return val;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
#ifdef __CUDACC__
if (opOutput.index < 0)
return old;
#endif
T res = simdOps::MatchCondition<T>::op(opOutput.value, extraParams);
if (res == static_cast<T>(0.0f))
return old;
if (old.index < 0)
return opOutput;
if (old.index < opOutput.index)
return opOutput;
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return -nd4j::DataTypeUtils::max<T>();
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = -1;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (f1.index < f2.index)
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
};
template<typename T>
class IndexMax {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> val, T *extraParams) {
return val;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
if (opOutput.value > old.value) {
return opOutput;
}
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (f1.value > f2.value)
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return -nd4j::DataTypeUtils::max<T>();
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
};
template<typename T>
class IndexAbsoluteMin {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(
functions::indexreduce::IndexValue<T> val, T *extraParams) {
return val;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return nd4j::DataTypeUtils::max<T>();
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
opOutput.value = nd4j::math::nd4j_abs<T>(opOutput.value);
old.value = nd4j::math::nd4j_abs<T>(old.value);
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (nd4j::math::nd4j_abs<T>(f1.value) < nd4j::math::nd4j_abs<T>(f2.value))
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
};
template<typename T>
class IndexMin {
public:
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(
functions::indexreduce::IndexValue<T> val, T *extraParams) {
return val;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline T startingValue(T *input) {
return nd4j::DataTypeUtils::max<T>();
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> startingIndexValue(T *input) {
functions::indexreduce::IndexValue<T> local;
local.value = startingValue(input);
local.index = 0;
return local;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> update(
functions::indexreduce::IndexValue<T> old,
functions::indexreduce::IndexValue<T> opOutput, T *extraParams) {
if (opOutput.value < old.value)
return opOutput;
#ifdef __CUDACC__
// workaround for cuda race condition at merge phase
else if (opOutput.value == old.value && opOutput.index < old.index)
return opOutput;
#elif defined(__GNUC__)
#endif
return old;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> merge(
functions::indexreduce::IndexValue<T> f1,
functions::indexreduce::IndexValue<T> f2, T *extraParams) {
if (f1.value < f2.value)
return f2;
return f1;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> postProcess(
functions::indexreduce::IndexValue<T> reduction, int n, int xOffset,
T *dx, int incx, T *extraParams, T *result) {
return reduction;
}
#ifdef __CUDACC__
__host__ __device__
#endif
static inline functions::indexreduce::IndexValue<T> op(functions::indexreduce::IndexValue<T> d1,
functions::indexreduce::IndexValue<T> d2, T *extraParams) {
return d1;
}
};
template<typename T>
class SummaryStatsVariance {
public:
static _CUDA_HD inline T getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<T> val) {
if (biasCorrected) {
T ret = val.varianceBiasCorrected();
if (ret < static_cast<T>(0.0f))
return val.variance();
return ret;
}
return val.variance();
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<T> op(functions::summarystats::SummaryStatsData<T> d1,T *extraParams) {
return d1;
}
};
template<typename T>
class SummaryStatsStandardDeviation {
public:
static _CUDA_HD inline T getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<T> val) {
if (biasCorrected) {
T ret = val.varianceBiasCorrected();
if (ret < static_cast<T>(0.0f))
return nd4j::math::nd4j_sqrt(val.variance());
else
return nd4j::math::nd4j_sqrt(ret);
}
return nd4j::math::nd4j_sqrt(val.variance());
}
static _CUDA_HD inline functions::summarystats::SummaryStatsData<T> op(functions::summarystats::SummaryStatsData<T> d1,T *extraParams) {
return d1;
}
};
template<typename T>
class DropOut {
public:
no_op_exec_special
no_op_exec_special_cuda
inline _CUDA_D static T op(T d1, T *params) {
T prob = params[0];
#ifdef __CUDACC__
T length = params[1];
T tid = gridDim.x * blockDim.x + threadIdx.x;
T rnd = nd4j::math::nd4j_abs<T>(nd4j::math::nd4j_cos<T>(static_cast<T>(clock64()) * static_cast<T>(tid) + static_cast<T>(length) * static_cast<T>(tid)));
#else
T rnd = static_cast<T>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<T>(0.0f) : d1;
}
};
template<typename T>
class DropOutInverted {
public:
no_op_exec_special
no_op_exec_special_cuda
#ifdef __CUDACC__
__device__
#endif
inline static T op(T d1, T *params) {
T prob = params[0];
#ifdef __CUDACC__
T length = params[1];
T tid = gridDim.x * blockDim.x + threadIdx.x;
T rnd = nd4j::math::nd4j_abs<T>(nd4j::math::nd4j_cos<T>(static_cast<T>(clock64()) * static_cast<T>(tid) + static_cast<T>(length) * static_cast<T>(tid)));
#else
T rnd = static_cast<T>(rand() / RAND_MAX);
#endif
return rnd >= prob ? static_cast<T>(0.0f) : d1 / prob;
}
};
template<typename T>
class ReplaceNans {
public:
no_op_exec_special
no_op_exec_special_cuda
op_def static T op(T d1, T *params) {
T replacement = params[0];
return nd4j::math::nd4j_isnan(d1) ? replacement : d1 ;
}
};
// this op is used for conditional pairwise transforms only
template<typename T>
class CompareAndReplace{
public:
no_op_exec_special
no_op_exec_special_cuda
// op definition for PairWise Transform
op_def static T op(T d1, T d2, T *params) {
T compare = params[0];
T eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<T>(d1 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals eps
if (nd4j::math::nd4j_abs<T>(d1 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than eps
if (d1 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<T>(d1) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<T>(d1) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return d2;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
template<typename T>
class CompareAndSet {
public:
no_op_exec_special
no_op_exec_special_cuda
// op definition for Transform
op_def static T op(T d1, T *params) {
T compare = params[0];
T set = params[1];
T eps = params[2];
// with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<T>(d1 - compare) <= eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<T>(d1 - compare) > eps)
return set;
else
return d1;
//return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1;
else if (mode == 2) // less_than
if (d1 < compare)
return set;
else
return d1;
else if (mode ==3) // greater_than
if (d1 > compare)
return set;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d1 <= compare)
return set;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d1 >= compare)
return set;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<T>(d1) < compare)
return set;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<T>(d1) > compare)
return set;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d1))
return set;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d1))
return set;
else
return d1;
else if (mode == 10)
if (d1 == compare)
return set;
else
return d1;
else if (mode == 11)
if (d1 != compare)
return set;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) >= compare)
return set;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) <= compare)
return set;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
// op definition for PairWise Transform
op_def static T op(T d1, T d2, T *params) {
T compare = params[0];
T eps = params[2];
int mode = (int) params[3];
if (mode == 0) // equals
if (nd4j::math::nd4j_abs<T>(d2 - compare) <= eps)
return d2;
else
return d1;
else if (mode == 1) // not equals
if (nd4j::math::nd4j_abs<T>(d2 - compare) > eps)
return d2;
else
return d1;
else if (mode == 2) // less_than
if (d2 < compare)
return d2;
else
return d1;
else if (mode ==3) // greater_than
if (d2 > compare)
return d2;
else
return d1;
else if (mode == 4) // less_or_equals_than
if (d2 <= compare)
return d2;
else
return d1;
else if (mode == 5) // greater_or_equals_than
if (d2 >= compare)
return d2;
else
return d1;
else if (mode == 6) // abs_less_than
if (nd4j::math::nd4j_abs<T>(d2) < compare)
return d2;
else
return d1;
else if (mode == 7) // abs_greater_than
if (nd4j::math::nd4j_abs<T>(d2) > compare)
return d2;
else
return d1;
else if (mode == 8) // is inf
if (nd4j::math::nd4j_isinf(d2))
return d2;
else
return d1;
else if (mode == 9) // is nan
if (nd4j::math::nd4j_isnan(d2))
return d2;
else
return d1;
else if (mode == 10)
if (d2 == compare)
return d2;
else
return d1;
else if (mode == 11)
if (d2 != compare)
return d2;
else
return d1;
else if (mode == 12) // abs_greater_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) >= compare)
return d2;
else
return d1;
else if (mode == 13) // abs_less_or_equals_than
if (nd4j::math::nd4j_abs<T>(d1) <= compare)
return d2;
else
return d1;
else
printf("Undefined boolean operation: [%i]\n", mode);
return d1;
}
};
}
#endif
|
DelayedUpdate.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2019 QMCPACK developers.
//
// File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_DELAYED_UPDATE_H
#define QMCPLUSPLUS_DELAYED_UPDATE_H
#include <OhmmsPETE/OhmmsVector.h>
#include <OhmmsPETE/OhmmsMatrix.h>
#include "Numerics/OhmmsBlas.h"
#include "QMCWaveFunctions/Fermion/DiracMatrix.h"
#include "Numerics/BlasThreadingEnv.h"
#include "config.h"
namespace qmcplusplus
{
/** implements delayed update on CPU using BLAS
* @tparam T base precision for most computation
* @tparam T_FP high precision for matrix inversion, T_FP >= T
*/
template<typename T, typename T_FP>
class DelayedUpdate
{
/// orbital values of delayed electrons
Matrix<T> U;
/// rows of Ainv corresponding to delayed electrons
Matrix<T> V;
/// Matrix inverse of B, at maximum KxK
Matrix<T> Binv;
/// scratch space, used during inverse update
Matrix<T> tempMat;
/// temporal scratch space used by SM-1
Vector<T> temp;
/// new column of B
Vector<T> p;
/// list of delayed electrons
std::vector<int> delay_list;
/// current number of delays, increase one for each acceptance, reset to 0 after updating Ainv
int delay_count;
/// matrix inversion engine
DiracMatrix<T_FP> detEng;
public:
/// default constructor
DelayedUpdate() : delay_count(0) {}
/** resize the internal storage
* @param norb number of electrons/orbitals
* @param delay, maximum delay 0<delay<=norb
*/
inline void resize(int norb, int delay)
{
V.resize(delay, norb);
U.resize(delay, norb);
p.resize(delay);
temp.resize(norb);
tempMat.resize(norb, delay);
Binv.resize(delay, delay);
delay_list.resize(delay);
}
/** compute the inverse of the transpose of matrix A
* @param logdetT orbital value matrix
* @param Ainv inverse matrix
*/
template<typename TREAL>
inline void invert_transpose(const Matrix<T>& logdetT, Matrix<T>& Ainv, std::complex<TREAL>& LogValue)
{
detEng.invert_transpose(logdetT, Ainv, LogValue);
// safe mechanism
delay_count = 0;
}
/** initialize internal objects when Ainv is refreshed
* @param Ainv inverse matrix
*/
inline void initializeInv(const Matrix<T>& Ainv)
{
// safe mechanism
delay_count = 0;
}
inline int getDelayCount() const { return delay_count; }
/** compute the row of up-to-date Ainv
* @param Ainv inverse matrix
* @param rowchanged the row id corresponding to the proposed electron
*/
template<typename VVT>
inline void getInvRow(const Matrix<T>& Ainv, int rowchanged, VVT& invRow)
{
if (delay_count == 0)
{
// Ainv is fresh, directly access Ainv
std::copy_n(Ainv[rowchanged], invRow.size(), invRow.data());
return;
}
const T cone(1);
const T czero(0);
const int norb = Ainv.rows();
const int lda_Binv = Binv.cols();
// save Ainv[rowchanged] to invRow
std::copy_n(Ainv[rowchanged], norb, invRow.data());
// multiply V (NxK) Binv(KxK) U(KxN) invRow right to the left
BLAS::gemv('T', norb, delay_count, cone, U.data(), norb, invRow.data(), 1, czero, p.data(), 1);
BLAS::gemv('N', delay_count, delay_count, cone, Binv.data(), lda_Binv, p.data(), 1, czero, Binv[delay_count], 1);
BLAS::gemv('N', norb, delay_count, -cone, V.data(), norb, Binv[delay_count], 1, cone, invRow.data(), 1);
}
/** accept a move with the update delayed
* @param Ainv inverse matrix
* @param rowchanged the row id corresponding to the proposed electron
* @param psiV new orbital values
*
* Before delay_count reaches the maximum delay, only Binv is updated with a recursive algorithm
*/
template<typename VVT>
inline void acceptRow(Matrix<T>& Ainv, int rowchanged, const VVT& psiV)
{
const T cminusone(-1);
const T czero(0);
const int norb = Ainv.rows();
const int lda_Binv = Binv.cols();
std::copy_n(Ainv[rowchanged], norb, V[delay_count]);
std::copy_n(psiV.data(), norb, U[delay_count]);
delay_list[delay_count] = rowchanged;
// the new Binv is [[X Y] [Z x]]
BLAS::gemv('T', norb, delay_count + 1, cminusone, V.data(), norb, psiV.data(), 1, czero, p.data(), 1);
// x
T y = -p[delay_count];
for (int i = 0; i < delay_count; i++)
y += Binv[delay_count][i] * p[i];
Binv[delay_count][delay_count] = y = T(1) / y;
// Y
BLAS::gemv('T', delay_count, delay_count, y, Binv.data(), lda_Binv, p.data(), 1, czero, Binv.data() + delay_count,
lda_Binv);
// X
BLAS::ger(delay_count, delay_count, cminusone, Binv[delay_count], 1, Binv.data() + delay_count, lda_Binv,
Binv.data(), lda_Binv);
// Z
for (int i = 0; i < delay_count; i++)
Binv[delay_count][i] *= -y;
delay_count++;
// update Ainv when maximal delay is reached
if (delay_count == lda_Binv)
updateInvMat(Ainv);
}
/** update the full Ainv and reset delay_count
* @param Ainv inverse matrix
*/
inline void updateInvMat(Matrix<T>& Ainv)
{
if (delay_count == 0)
return;
// update the inverse matrix
const T cone(1);
const T czero(0);
const int norb = Ainv.rows();
if (delay_count == 1)
{
// this is a special case invoking the Fahy's variant of Sherman-Morrison update.
// Only use the first norb elements of tempMat as a temporal array
BLAS::gemv('T', norb, norb, cone, Ainv.data(), norb, U[0], 1, czero, temp.data(), 1);
temp[delay_list[0]] -= cone;
BLAS::ger(norb, norb, -Binv[0][0], V[0], 1, temp.data(), 1, Ainv.data(), norb);
}
else
{
const int lda_Binv = Binv.cols();
// number of threads at the next level, forced to 1 if the problem is small.
const int num_threads = (norb < 256 ? 1 : getNextLevelNumThreads());
if (num_threads == 1 || BlasThreadingEnv::NestedThreadingSupported())
{
// threading depends on BLAS
BlasThreadingEnv knob(num_threads);
BLAS::gemm('T', 'N', delay_count, norb, norb, cone, U.data(), norb, Ainv.data(), norb, czero, tempMat.data(),
lda_Binv);
for (int i = 0; i < delay_count; i++)
tempMat(delay_list[i], i) -= cone;
BLAS::gemm('N', 'N', norb, delay_count, delay_count, cone, V.data(), norb, Binv.data(), lda_Binv, czero,
U.data(), norb);
BLAS::gemm('N', 'N', norb, norb, delay_count, -cone, U.data(), norb, tempMat.data(), lda_Binv, cone,
Ainv.data(), norb);
}
else
{
// manually threaded version of the above GEMM calls
#pragma omp parallel
{
const int block_size = getAlignedSize<T>((norb + num_threads - 1) / num_threads);
int num_block = (norb + block_size - 1) / block_size;
#pragma omp for
for (int ix = 0; ix < num_block; ix++)
{
int x_offset = ix * block_size;
BLAS::gemm('T', 'N', delay_count, std::min(norb - x_offset, block_size), norb, cone, U.data(), norb,
Ainv[x_offset], norb, czero, tempMat[x_offset], lda_Binv);
}
#pragma omp master
for (int i = 0; i < delay_count; i++)
tempMat(delay_list[i], i) -= cone;
#pragma omp for
for (int iy = 0; iy < num_block; iy++)
{
int y_offset = iy * block_size;
BLAS::gemm('N', 'N', std::min(norb - y_offset, block_size), delay_count, delay_count, cone,
V.data() + y_offset, norb, Binv.data(), lda_Binv, czero, U.data() + y_offset, norb);
}
#pragma omp for collapse(2) nowait
for (int iy = 0; iy < num_block; iy++)
for (int ix = 0; ix < num_block; ix++)
{
int x_offset = ix * block_size;
int y_offset = iy * block_size;
BLAS::gemm('N', 'N', std::min(norb - y_offset, block_size), std::min(norb - x_offset, block_size),
delay_count, -cone, U.data() + y_offset, norb, tempMat[x_offset], lda_Binv, cone,
Ainv[x_offset] + y_offset, norb);
}
}
}
}
delay_count = 0;
}
};
} // namespace qmcplusplus
#endif // QMCPLUSPLUS_DELAYED_UPDATE_H
|
GB_unop__identity_int32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int32_fc32
// op(A') function: GB_unop_tran__identity_int32_fc32
// C type: int32_t
// A type: GxB_FC32_t
// cast: int32_t cij = GB_cast_to_int32_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int32_fc32
(
int32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
diagsm_x_coo_u_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT c = 0; c < columns; ++c)
{
for (ALPHA_INT r = 0; r < A->rows; ++r)
{
alpha_mul(y[index2(c, r, ldy)], alpha, x[index2(c, r, ldx)]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.